Browse Source

Merge pull request #5 from apache/dev

upddate
pull/2/head
samz406 5 years ago committed by GitHub
parent
commit
6e1dbd894a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 67
      .github/workflows/ci_e2e.yml
  2. 2
      .github/workflows/ci_ut.yml
  3. 26
      README.md
  4. 14
      README_zh_CN.md
  5. 158
      ambari_plugin/common-services/DOLPHIN/1.2.1/alerts.json
  6. 144
      ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-alert.xml
  7. 71
      ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-application-api.xml
  8. 467
      ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-application.xml
  9. 232
      ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-common.xml
  10. 123
      ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-env.xml
  11. 131
      ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-quartz.xml
  12. 137
      ambari_plugin/common-services/DOLPHIN/1.2.1/metainfo.xml
  13. 124
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py
  14. 61
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_alert_service.py
  15. 70
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_api_service.py
  16. 121
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_env.py
  17. 61
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_logger_service.py
  18. 61
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_master_service.py
  19. 60
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_worker_service.py
  20. 150
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/params.py
  21. 31
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/service_check.py
  22. 23
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/status_params.py
  23. 7
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/alert.properties.j2
  24. 6
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/application-api.properties.j2
  25. 6
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/application.properties.j2
  26. 7
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/common.properties.j2
  27. 119
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/dolphin-daemon.j2
  28. 20
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/quartz.properties.j2
  29. 26
      ambari_plugin/common-services/DOLPHIN/1.2.1/quicklinks/quicklinks.json
  30. 605
      ambari_plugin/common-services/DOLPHIN/1.2.1/themes/theme.json
  31. BIN
      ambari_plugin/readme.pdf
  32. 26
      ambari_plugin/statcks/DOLPHIN/metainfo.xml
  33. 180
      dockerfile/Dockerfile
  34. 311
      dockerfile/README.md
  35. 306
      dockerfile/README_zh_CN.md
  36. 32
      dockerfile/conf/dolphinscheduler/alert.properties.tpl
  37. 17
      dockerfile/conf/dolphinscheduler/application-api.properties.tpl
  38. 60
      dockerfile/conf/dolphinscheduler/application.properties.tpl
  39. 69
      dockerfile/conf/dolphinscheduler/common.properties.tpl
  40. 49
      dockerfile/conf/dolphinscheduler/conf/alert_logback.xml
  41. 60
      dockerfile/conf/dolphinscheduler/conf/apiserver_logback.xml
  42. 80
      dockerfile/conf/dolphinscheduler/conf/combined_logback.xml
  43. 35
      dockerfile/conf/dolphinscheduler/conf/common/hadoop/hadoop.properties
  44. 252
      dockerfile/conf/dolphinscheduler/conf/i18n/messages.properties
  45. 252
      dockerfile/conf/dolphinscheduler/conf/i18n/messages_en_US.properties
  46. 250
      dockerfile/conf/dolphinscheduler/conf/i18n/messages_zh_CN.properties
  47. 17
      dockerfile/conf/dolphinscheduler/conf/mail_templates/alert_mail_template.ftl
  48. 52
      dockerfile/conf/dolphinscheduler/conf/master_logback.xml
  49. 33
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/AccessTokenMapper.xml
  50. 47
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.xml
  51. 26
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/AlertMapper.xml
  52. 43
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/CommandMapper.xml
  53. 79
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/DataSourceMapper.xml
  54. 30
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/DataSourceUserMapper.xml
  55. 36
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ErrorCommandMapper.xml
  56. 96
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
  57. 43
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapMapper.xml
  58. 182
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml
  59. 68
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProjectMapper.xml
  60. 36
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProjectUserMapper.xml
  61. 42
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/QueueMapper.xml
  62. 74
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml
  63. 32
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ResourceUserMapper.xml
  64. 58
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ScheduleMapper.xml
  65. 32
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/SessionMapper.xml
  66. 129
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/TaskInstanceMapper.xml
  67. 41
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/TenantMapper.xml
  68. 29
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UDFUserMapper.xml
  69. 71
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml
  70. 31
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UserAlertGroupMapper.xml
  71. 72
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UserMapper.xml
  72. 40
      dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/WorkerGroupMapper.xml
  73. 79
      dockerfile/conf/dolphinscheduler/conf/worker_logback.xml
  74. 42
      dockerfile/conf/dolphinscheduler/conf/zookeeper.properties
  75. 26
      dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env
  76. 21
      dockerfile/conf/dolphinscheduler/quartz.properties.tpl
  77. 263
      dockerfile/conf/maven/settings.xml
  78. 4
      dockerfile/conf/nginx/dolphinscheduler.conf
  79. 31
      dockerfile/hooks/build
  80. 56
      dockerfile/hooks/build.bat
  81. 27
      dockerfile/hooks/check
  82. 2
      dockerfile/hooks/push
  83. 23
      dockerfile/hooks/push.bat
  84. 100
      dockerfile/startup-init-conf.sh
  85. 223
      dockerfile/startup.sh
  86. 4
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
  87. 43
      dolphinscheduler-api/pom.xml
  88. 12
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessInstanceController.java
  89. 8
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/TaskInstanceController.java
  90. 137
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/log/LogClient.java
  91. 40
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/BaseDAGService.java
  92. 10
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataAnalysisService.java
  93. 3
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
  94. 42
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java
  95. 36
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java
  96. 108
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
  97. 61
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
  98. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
  99. 22
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java
  100. 33
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java
  101. Some files were not shown because too many files have changed in this diff Show More

67
.github/workflows/ci_e2e.yml

@ -0,0 +1,67 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
on: ["push", "pull_request"]
env:
DOCKER_DIR: ./docker
LOG_DIR: /tmp/dolphinscheduler
name: e2e Test
jobs:
build:
name: Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
submodules: true
- uses: actions/cache@v1
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Build Image
run: |
export VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'`
sh ./dockerfile/hooks/build
- name: Docker Run
run: |
VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'`
docker run -dit -e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -p 8888:8888 dolphinscheduler:$VERSION all
- name: Check Server Status
run: sh ./dockerfile/hooks/check
- name: Prepare e2e env
run: |
sudo apt-get install -y libxss1 libappindicator1 libindicator7 xvfb unzip
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo dpkg -i google-chrome*.deb
sudo apt-get install -f -y
wget -N https://chromedriver.storage.googleapis.com/80.0.3987.106/chromedriver_linux64.zip
unzip chromedriver_linux64.zip
sudo mv -f chromedriver /usr/local/share/chromedriver
sudo ln -s /usr/local/share/chromedriver /usr/local/bin/chromedriver
- name: Run e2e Test
run: cd ./e2e && mvn -B clean test
- name: Collect logs
run: |
mkdir -p ${LOG_DIR}
docker logs dolphinscheduler > ${LOG_DIR}/dolphinscheduler.txt
continue-on-error: true

2
.github/workflows/ci_ut.yml

@ -20,7 +20,7 @@ env:
DOCKER_DIR: ./docker
LOG_DIR: /tmp/dolphinscheduler
name: Test Coveralls Parallel
name: Unit Test
jobs:

26
README.md

@ -45,17 +45,16 @@ HA is supported by itself | All process definition operations are visualized, dr
Overload processing: Task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured, when too many tasks will be cached in the task queue, will not cause machine jam. | One-click deployment | Supports traditional shell tasks, and also support big data platform task scheduling: MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Procedure, Sub_Process | |
### System partial screenshot
![image](https://user-images.githubusercontent.com/48329107/61368744-1f5f3b00-a8c1-11e9-9cf1-10f8557a6b3b.png)
![image](https://user-images.githubusercontent.com/48329107/61368966-9dbbdd00-a8c1-11e9-8dcc-a9469d33583e.png)
![image](https://user-images.githubusercontent.com/48329107/61372146-f347b800-a8c8-11e9-8882-66e8934ada23.png)
![home page](https://user-images.githubusercontent.com/15833811/75218288-bf286400-57d4-11ea-8263-d639c6511d5f.jpg)
![dag](https://user-images.githubusercontent.com/15833811/75236750-3374fe80-57f9-11ea-857d-62a66a5a559d.png)
![process definition list page](https://user-images.githubusercontent.com/15833811/75216886-6f479e00-57d0-11ea-92dd-66e7640a186f.png)
![view task log online](https://user-images.githubusercontent.com/15833811/75216924-9900c500-57d0-11ea-91dc-3522a76bdbbe.png)
![resource management](https://user-images.githubusercontent.com/15833811/75216984-be8dce80-57d0-11ea-840d-58546edc8788.png)
![monitor](https://user-images.githubusercontent.com/59273635/75625839-c698a480-5bfc-11ea-8bbe-895b561b337f.png)
![security](https://user-images.githubusercontent.com/15833811/75236441-bfd2f180-57f8-11ea-88bd-f24311e01b7e.png)
![treeview](https://user-images.githubusercontent.com/15833811/75217191-3fe56100-57d1-11ea-8856-f19180d9a879.png)
### Document
- <a href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/backend-deployment.html" target="_blank">Backend deployment documentation</a>
@ -100,16 +99,9 @@ It is because of the shoulders of these open source projects that the birth of t
### Get Help
1. Submit an issue
1. Subscribe the mail list : https://dolphinscheduler.apache.org/en-us/docs/development/subscribe.html. then send mail to dev@dolphinscheduler.apache.org
1. Contact WeChat group manager, ID 510570367. This is for Mandarin(CN) discussion.
1. Contact WeChat(dailidong66). This is just for Mandarin(CN) discussion.
### License
Please refer to [LICENSE](https://github.com/apache/incubator-dolphinscheduler/blob/dev/LICENSE) file.

14
README_zh_CN.md

@ -36,11 +36,19 @@ Dolphin Scheduler Official Website
### 系统部分截图
![](http://geek.analysys.cn/static/upload/221/2019-03-29/0a9dea80-fb02-4fa5-a812-633b67035ffc.jpeg)
![home page](https://user-images.githubusercontent.com/15833811/75208819-abbad000-57b7-11ea-8d3c-67e7c270671f.jpg)
![](http://geek.analysys.cn/static/upload/221/2019-04-01/83686def-a54f-4169-8cae-77b1f8300cc1.png)
![dag](https://user-images.githubusercontent.com/15833811/75209584-93e44b80-57b9-11ea-952e-537fb24ec72d.jpg)
![](http://geek.analysys.cn/static/upload/221/2019-03-29/83c937c7-1793-4d7a-aa28-b98460329fe0.jpeg)
![log](https://user-images.githubusercontent.com/15833811/75209645-c55d1700-57b9-11ea-94d4-e3fa91ab5218.jpg)
![gantt](https://user-images.githubusercontent.com/15833811/75209640-c0986300-57b9-11ea-878e-a2098533ad44.jpg)
![resources](https://user-images.githubusercontent.com/15833811/75209403-11f42280-57b9-11ea-9b59-d4be77063553.jpg)
![monitor](https://user-images.githubusercontent.com/15833811/75209631-b5ddce00-57b9-11ea-8d22-cdf15cf0ee25.jpg)
![security](https://user-images.githubusercontent.com/15833811/75209633-baa28200-57b9-11ea-9def-94bef2e212a7.jpg)
### 文档

158
ambari_plugin/common-services/DOLPHIN/1.2.1/alerts.json

@ -0,0 +1,158 @@
{
"DOLPHIN": {
"service": [],
"DOLPHIN_API": [
{
"name": "dolphin_api_port_check",
"label": "dolphin_api_port_check",
"description": "dolphin_api_port_check.",
"interval": 10,
"scope": "ANY",
"source": {
"type": "PORT",
"uri": "{{dolphin-application-api/server.port}}",
"default_port": 12345,
"reporting": {
"ok": {
"text": "TCP OK - {0:.3f}s response on port {1}"
},
"warning": {
"text": "TCP OK - {0:.3f}s response on port {1}",
"value": 1.5
},
"critical": {
"text": "Connection failed: {0} to {1}:{2}",
"value": 5.0
}
}
}
}
],
"DOLPHIN_LOGGER": [
{
"name": "dolphin_logger_port_check",
"label": "dolphin_logger_port_check",
"description": "dolphin_logger_port_check.",
"interval": 10,
"scope": "ANY",
"source": {
"type": "PORT",
"uri": "{{dolphin-common/loggerserver.rpc.port}}",
"default_port": 50051,
"reporting": {
"ok": {
"text": "TCP OK - {0:.3f}s response on port {1}"
},
"warning": {
"text": "TCP OK - {0:.3f}s response on port {1}",
"value": 1.5
},
"critical": {
"text": "Connection failed: {0} to {1}:{2}",
"value": 5.0
}
}
}
}
],
"DOLPHIN_MASTER": [
{
"name": "DOLPHIN_MASTER_CHECK",
"label": "check dolphin scheduler master status",
"description": "",
"interval":10,
"scope": "HOST",
"enabled": true,
"source": {
"type": "SCRIPT",
"path": "DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py",
"parameters": [
{
"name": "connection.timeout",
"display_name": "Connection Timeout",
"value": 5.0,
"type": "NUMERIC",
"description": "The maximum time before this alert is considered to be CRITICAL",
"units": "seconds",
"threshold": "CRITICAL"
},
{
"name": "alertName",
"display_name": "alertName",
"value": "DOLPHIN_MASTER",
"type": "STRING",
"description": "alert name"
}
]
}
}
],
"DOLPHIN_WORKER": [
{
"name": "DOLPHIN_WORKER_CHECK",
"label": "check dolphin scheduler worker status",
"description": "",
"interval":10,
"scope": "HOST",
"enabled": true,
"source": {
"type": "SCRIPT",
"path": "DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py",
"parameters": [
{
"name": "connection.timeout",
"display_name": "Connection Timeout",
"value": 5.0,
"type": "NUMERIC",
"description": "The maximum time before this alert is considered to be CRITICAL",
"units": "seconds",
"threshold": "CRITICAL"
},
{
"name": "alertName",
"display_name": "alertName",
"value": "DOLPHIN_WORKER",
"type": "STRING",
"description": "alert name"
}
]
}
}
],
"DOLPHIN_ALERT": [
{
"name": "DOLPHIN_DOLPHIN_ALERT_CHECK",
"label": "check dolphin scheduler alert status",
"description": "",
"interval":10,
"scope": "HOST",
"enabled": true,
"source": {
"type": "SCRIPT",
"path": "DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py",
"parameters": [
{
"name": "connection.timeout",
"display_name": "Connection Timeout",
"value": 5.0,
"type": "NUMERIC",
"description": "The maximum time before this alert is considered to be CRITICAL",
"units": "seconds",
"threshold": "CRITICAL"
},
{
"name": "alertName",
"display_name": "alertName",
"value": "DOLPHIN_ALERT",
"type": "STRING",
"description": "alert name"
}
]
}
}
]
}
}

144
ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-alert.xml

@ -0,0 +1,144 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>alert.type</name>
<value>EMAIL</value>
<description>alert type is EMAIL/SMS</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.protocol</name>
<value>SMTP</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.server.host</name>
<value>xxx.xxx.com</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.server.port</name>
<value>25</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.sender</name>
<value>admin</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.user</name>
<value>admin</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.passwd</name>
<value>000000</value>
<description></description>
<property-type>PASSWORD</property-type>
<value-attributes>
<type>password</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.smtp.starttls.enable</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.smtp.ssl.enable</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.smtp.ssl.trust</name>
<value>xxx.xxx.com</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>xls.file.path</name>
<value>/tmp/xls</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.enable</name>
<value>false</value>
<description></description>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>true</value>
<label>Enabled</label>
</entry>
<entry>
<value>false</value>
<label>Disabled</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.corp.id</name>
<value>wechatId</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.secret</name>
<value>secret</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.agent.id</name>
<value>agentId</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.users</name>
<value>wechatUsers</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

71
ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-application-api.xml

@ -0,0 +1,71 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>server.port</name>
<value>12345</value>
<description>
server port
</description>
<value-attributes>
<type>int</type>
</value-attributes>
</property>
<property>
<name>server.servlet.session.timeout</name>
<value>7200</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
</property>
<property>
<name>spring.servlet.multipart.max-file-size</name>
<value>1024</value>
<value-attributes>
<unit>MB</unit>
<type>int</type>
</value-attributes>
<description>
</description>
</property>
<property>
<name>spring.servlet.multipart.max-request-size</name>
<value>1024</value>
<value-attributes>
<unit>MB</unit>
<type>int</type>
</value-attributes>
<description>
</description>
</property>
<property>
<name>server.jetty.max-http-post-size</name>
<value>5000000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
</property>
<property>
<name>spring.messages.encoding</name>
<value>UTF-8</value>
<description></description>
</property>
</configuration>

467
ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-application.xml

@ -0,0 +1,467 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>spring.datasource.initialSize</name>
<value>5</value>
<description>
Init connection number
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.minIdle</name>
<value>5</value>
<description>
Min connection number
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.maxActive</name>
<value>50</value>
<description>
Max connection number
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.maxWait</name>
<value>60000</value>
<description>
Max wait time for get a connection in milliseconds.
If configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.timeBetweenEvictionRunsMillis</name>
<value>60000</value>
<description>
Milliseconds for check to close free connections
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.timeBetweenConnectErrorMillis</name>
<value>60000</value>
<description>
The Destroy thread detects the connection interval and closes the physical connection in milliseconds
if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.minEvictableIdleTimeMillis</name>
<value>300000</value>
<description>
The longest time a connection remains idle without being evicted, in milliseconds
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.validationQuery</name>
<value>SELECT 1</value>
<description>
The SQL used to check whether the connection is valid requires a query statement.
If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.validationQueryTimeout</name>
<value>3</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
Check whether the connection is valid for timeout, in seconds
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.testWhileIdle</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
When applying for a connection,
if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
validation Query is performed to check whether the connection is valid
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.testOnBorrow</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
Execute validation to check if the connection is valid when applying for a connection
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.testOnReturn</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
Execute validation to check if the connection is valid when the connection is returned
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.defaultAutoCommit</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.keepAlive</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.poolPreparedStatements</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
Open PSCache, specify count PSCache for every connection
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.maxPoolPreparedStatementPerConnectionSize</name>
<value>20</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.spring.datasource.filters</name>
<value>stat,wall,log4j</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.connectionProperties</name>
<value>druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.mapper-locations</name>
<value>classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.typeEnumsPackage</name>
<value>org.apache.dolphinscheduler.*.enums</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.typeAliasesPackage</name>
<value>org.apache.dolphinscheduler.dao.entity</value>
<description>
Entity scan, where multiple packages are separated by a comma or semicolon
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.id-type</name>
<value>AUTO</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>AUTO</value>
<label>AUTO</label>
</entry>
<entry>
<value>INPUT</value>
<label>INPUT</label>
</entry>
<entry>
<value>ID_WORKER</value>
<label>ID_WORKER</label>
</entry>
<entry>
<value>UUID</value>
<label>UUID</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>
Primary key type AUTO:" database ID AUTO ",
INPUT:" user INPUT ID",
ID_WORKER:" global unique ID (numeric type unique ID)",
UUID:" global unique ID UUID";
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.field-strategy</name>
<value>NOT_NULL</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>IGNORED</value>
<label>IGNORED</label>
</entry>
<entry>
<value>NOT_NULL</value>
<label>NOT_NULL</label>
</entry>
<entry>
<value>NOT_EMPTY</value>
<label>NOT_EMPTY</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>
Field policy IGNORED:" ignore judgment ",
NOT_NULL:" not NULL judgment "),
NOT_EMPTY:" not NULL judgment"
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.column-underline</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.logic-delete-value</name>
<value>1</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.logic-not-delete-value</name>
<value>0</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.banner</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.map-underscore-to-camel-case</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.cache-enabled</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.call-setters-on-nulls</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.jdbc-type-for-null</name>
<value>null</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.exec.task.num</name>
<value>20</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.retryTimes</name>
<value>5</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.interval</name>
<value>1000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.reserved.memory</name>
<value>0.1</value>
<value-attributes>
<type>float</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.fetch.task.num</name>
<value>3</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.reserved.memory</name>
<value>0.1</value>
<value-attributes>
<type>float</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

232
ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-common.xml

@ -0,0 +1,232 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>dolphinscheduler.queue.impl</name>
<value>zookeeper</value>
<description>
Task queue implementation, default "zookeeper"
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.dolphinscheduler.root</name>
<value>/dolphinscheduler</value>
<description>
dolphinscheduler root directory
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.session.timeout</name>
<value>300</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.connection.timeout</name>
<value>300</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.retry.base.sleep</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.retry.max.sleep</name>
<value>30000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.retry.maxtime</name>
<value>5</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>res.upload.startup.type</name>
<display-name>Choose Resource Upload Startup Type</display-name>
<description>
Resource upload startup type : HDFS,S3,NONE
</description>
<value>NONE</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>HDFS</value>
<label>HDFS</label>
</entry>
<entry>
<value>S3</value>
<label>S3</label>
</entry>
<entry>
<value>NONE</value>
<label>NONE</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>hdfs.root.user</name>
<value>hdfs</value>
<description>
Users who have permission to create directories under the HDFS root path
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>data.store2hdfs.basepath</name>
<value>/dolphinscheduler</value>
<description>
Data base dir, resource file will store to this hadoop hdfs path, self configuration,
please make sure the directory exists on hdfs and have read write permissions。
"/dolphinscheduler" is recommended
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>data.basedir.path</name>
<value>/tmp/dolphinscheduler</value>
<description>
User data directory path, self configuration,
please make sure the directory exists and have read write permissions
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>hadoop.security.authentication.startup.state</name>
<value>false</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>true</value>
<label>Enabled</label>
</entry>
<entry>
<value>false</value>
<label>Disabled</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>java.security.krb5.conf.path</name>
<value>/opt/krb5.conf</value>
<description>
java.security.krb5.conf path
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>login.user.keytab.username</name>
<value>hdfs-mycluster@ESZ.COM</value>
<description>
LoginUserFromKeytab user
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>login.user.keytab.path</name>
<value>/opt/hdfs.headless.keytab</value>
<description>
LoginUserFromKeytab path
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>resource.view.suffixs</name>
<value>txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster:8020</value>
<description>
HA or single namenode,
If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory,
support s3,for example : s3a://dolphinscheduler
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.s3a.endpoint</name>
<value>http://host:9010</value>
<description>
s3 need,s3 endpoint
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.s3a.access.key</name>
<value>A3DXS30FO22544RE</value>
<description>
s3 need,s3 access key
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.s3a.secret.key</name>
<value>OloCLq3n+8+sdPHUhJ21XrSxTC+JK</value>
<description>
s3 need,s3 secret key
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>loggerserver.rpc.port</name>
<value>50051</value>
<value-attributes>
<type>int</type>F
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

123
ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-env.xml

@ -0,0 +1,123 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>dolphin.database.type</name>
<value>mysql</value>
<description>Dolphin Scheduler DataBase Type Which Is Select</description>
<display-name>Dolphin Database Type</display-name>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>mysql</value>
<label>Mysql</label>
</entry>
<entry>
<value>postgresql</value>
<label>Postgresql</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.database.host</name>
<value></value>
<display-name>Dolphin Database Host</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.database.port</name>
<value></value>
<display-name>Dolphin Database Port</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.database.username</name>
<value></value>
<display-name>Dolphin Database Username</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.database.password</name>
<value></value>
<display-name>Dolphin Database Password</display-name>
<property-type>PASSWORD</property-type>
<value-attributes>
<type>password</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.user</name>
<value></value>
<description>Which user to install and admin dolphin scheduler</description>
<display-name>Deploy User</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.group</name>
<value></value>
<description>Which user to install and admin dolphin scheduler</description>
<display-name>Deploy Group</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphinscheduler-env-content</name>
<display-name>Dolphinscheduler Env template</display-name>
<description>This is the jinja template for dolphinscheduler.env.sh file</description>
<value>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/opt/soft/python
export JAVA_HOME=/opt/soft/java
export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink</value>
<value-attributes>
<type>content</type>
<empty-value-valid>false</empty-value-valid>
<show-property-name>false</show-property-name>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

131
ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-quartz.xml

@ -0,0 +1,131 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>org.quartz.scheduler.instanceName</name>
<value>DolphinScheduler</value>
<description></description>
</property>
<property>
<!-- 列举枚举值 -->
<name>org.quartz.scheduler.instanceId</name>
<value>AUTO</value>
<description></description>
</property>
<property>
<name>org.quartz.scheduler.makeSchedulerThreadDaemon</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.useProperties</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.threadPool.class</name>
<value>org.quartz.simpl.SimpleThreadPool</value>
<description></description>
</property>
<property>
<name>org.quartz.threadPool.makeThreadsDaemons</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.threadPool.threadCount</name>
<value>25</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.threadPool.threadPriority</name>
<value>5</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.class</name>
<value>org.quartz.impl.jdbcjobstore.JobStoreTX</value>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.tablePrefix</name>
<value>QRTZ_</value>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.isClustered</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.misfireThreshold</name>
<value>60000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.clusterCheckinInterval</name>
<value>5000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.dataSource</name>
<value>myDs</value>
<description></description>
</property>
<property>
<name>org.quartz.dataSource.myDs.connectionProvider.class</name>
<value>org.apache.dolphinscheduler.server.quartz.DruidConnectionProvider</value>
<description></description>
</property>
<property>
<name>org.quartz.dataSource.myDs.maxConnections</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.dataSource.myDs.validationQuery</name>
<value>select 1</value>
<description></description>
</property>
</configuration>

137
ambari_plugin/common-services/DOLPHIN/1.2.1/metainfo.xml

@ -0,0 +1,137 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<metainfo>
<schemaVersion>2.0</schemaVersion>
<services>
<service>
<name>DOLPHIN</name>
<displayName>Dolphin Scheduler</displayName>
<comment>分布式易扩展的可视化DAG工作流任务调度系统</comment>
<version>1.2.1</version>
<components>
<component>
<name>DOLPHIN_MASTER</name>
<displayName>DS Master</displayName>
<category>MASTER</category>
<cardinality>1+</cardinality>
<commandScript>
<script>scripts/dolphin_master_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
<component>
<name>DOLPHIN_LOGGER</name>
<displayName>DS Logger</displayName>
<category>SLAVE</category>
<cardinality>1+</cardinality>
<commandScript>
<script>scripts/dolphin_logger_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
<component>
<name>DOLPHIN_WORKER</name>
<displayName>DS Worker</displayName>
<category>SLAVE</category>
<cardinality>1+</cardinality>
<dependencies>
<dependency>
<name>DOLPHIN/DOLPHIN_LOGGER</name>
<scope>host</scope>
<auto-deploy>
<enabled>true</enabled>
</auto-deploy>
</dependency>
</dependencies>
<commandScript>
<script>scripts/dolphin_worker_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
<component>
<name>DOLPHIN_ALERT</name>
<displayName>DS Alert</displayName>
<category>SLAVE</category>
<cardinality>1</cardinality>
<commandScript>
<script>scripts/dolphin_alert_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
<component>
<name>DOLPHIN_API</name>
<displayName>DS_Api</displayName>
<category>SLAVE</category>
<cardinality>1</cardinality>
<commandScript>
<script>scripts/dolphin_api_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
</components>
<requiredServices>
<service>ZOOKEEPER</service>
</requiredServices>
<osSpecifics>
<osSpecific>
<osFamily>any</osFamily>
<packages>
<package>
<name>apache-dolphinscheduler-incubating-1.2.1*</name>
</package>
</packages>
</osSpecific>
</osSpecifics>
<configuration-dependencies>
<config-type>dolphin-alert</config-type>
<config-type>dolphin-app-api</config-type>
<config-type>dolphin-app-dao</config-type>
<config-type>dolphin-common</config-type>
<config-type>dolphin-env</config-type>
<config-type>dolphin-quartz</config-type>
</configuration-dependencies>
<themes>
<theme>
<fileName>theme.json</fileName>
<default>true</default>
</theme>
</themes>
<quickLinksConfigurations-dir>quicklinks</quickLinksConfigurations-dir>
<quickLinksConfigurations>
<quickLinksConfiguration>
<fileName>quicklinks.json</fileName>
<default>true</default>
</quickLinksConfiguration>
</quickLinksConfigurations>
</service>
</services>
</metainfo>

124
ambari_plugin/common-services/DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py

@ -0,0 +1,124 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
import urllib2
import os
import logging
import ambari_simplejson as json
from resource_management.libraries.script.script import Script
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
logger = logging.getLogger('ambari_alerts')
config = Script.get_config()
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
:rtype tuple
"""
def get_info(url, connection_timeout):
response = None
try:
response = urllib2.urlopen(url, timeout=connection_timeout)
json_data = response.read()
return json_data
finally:
if response is not None:
try:
response.close()
except:
pass
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations : a mapping of configuration key to value
parameters : a mapping of script parameter key to value
host_name : the name of this host where the alert is running
:type configurations dict
:type parameters dict
:type host_name str
"""
alert_name = parameters['alertName']
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler"
pid = "0"
from resource_management.core import sudo
is_running = True
pid_file_path = ""
if alert_name == 'DOLPHIN_MASTER':
pid_file_path = dolphin_pidfile_dir + "/master-server.pid"
elif alert_name == 'DOLPHIN_WORKER':
pid_file_path = dolphin_pidfile_dir + "/worker-server.pid"
elif alert_name == 'DOLPHIN_ALERT':
pid_file_path = dolphin_pidfile_dir + "/alert-server.pid"
elif alert_name == 'DOLPHIN_LOGGER':
pid_file_path = dolphin_pidfile_dir + "/logger-server.pid"
elif alert_name == 'DOLPHIN_API':
pid_file_path = dolphin_pidfile_dir + "/api-server.pid"
if not pid_file_path or not os.path.isfile(pid_file_path):
is_running = False
try:
pid = int(sudo.read_file(pid_file_path))
except:
is_running = False
try:
# Kill will not actually kill the process
# From the doc:
# If sig is 0, then no signal is sent, but error checking is still
# performed; this can be used to check for the existence of a
# process ID or process group ID.
sudo.kill(pid, 0)
except OSError:
is_running = False
if host_name is None:
host_name = socket.getfqdn()
if not is_running:
result_code = "CRITICAL"
else:
result_code = "OK"
label = "The comment {0} of DOLPHIN_SCHEDULER on {1} is {2}".format(alert_name, host_name, result_code)
return ((result_code, [label]))
if __name__ == "__main__":
pass

61
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_alert_service.py

@ -0,0 +1,61 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinAlertService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
no_op_test = format("ls {dolphin_pidfile_dir}/alert-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/alert-server.pid` | grep `cat {dolphin_pidfile_dir}/alert-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start alert-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop alert-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "alert-server.pid")
if __name__ == "__main__":
DolphinAlertService().execute()

70
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_api_service.py

@ -0,0 +1,70 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinApiService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
#init
init_cmd=format("sh " + params.dolphin_home + "/script/create-dolphinscheduler.sh")
Execute(init_cmd, user=params.dolphin_user)
#upgrade
upgrade_cmd=format("sh " + params.dolphin_home + "/script/upgrade-dolphinscheduler.sh")
Execute(upgrade_cmd, user=params.dolphin_user)
no_op_test = format("ls {dolphin_pidfile_dir}/api-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/api-server.pid` | grep `cat {dolphin_pidfile_dir}/api-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start api-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop api-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "api-server.pid")
if __name__ == "__main__":
DolphinApiService().execute()

121
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_env.py

@ -0,0 +1,121 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
def dolphin_env():
import params
Directory(params.dolphin_pidfile_dir,
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
Directory(params.dolphin_log_dir,
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
Directory(params.dolphin_conf_dir,
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
Directory(params.dolphin_alert_map['xls.file.path'],
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
Directory(params.dolphin_common_map['data.basedir.path'],
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
Directory(params.dolphin_common_map['data.download.basedir.path'],
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
Directory(params.dolphin_common_map['process.exec.basepath'],
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
File(format(params.dolphin_env_path),
mode=0777,
content=InlineTemplate(params.dolphin_env_content),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh"),
mode=0755,
content=Template("dolphin-daemon.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/alert.properties"),
mode=0755,
content=Template("alert.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/application.properties"),
mode=0755,
content=Template("application.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/application-api.properties"),
mode=0755,
content=Template("application-api.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/common.properties"),
mode=0755,
content=Template("common.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/quartz.properties"),
mode=0755,
content=Template("quartz.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)

61
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_logger_service.py

@ -0,0 +1,61 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinLoggerService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
no_op_test = format("ls {dolphin_pidfile_dir}/logger-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/logger-server.pid` | grep `cat {dolphin_pidfile_dir}/logger-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start logger-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop logger-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "logger-server.pid")
if __name__ == "__main__":
DolphinLoggerService().execute()

61
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_master_service.py

@ -0,0 +1,61 @@
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinMasterService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
no_op_test = format("ls {dolphin_pidfile_dir}/master-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/master-server.pid` | grep `cat {dolphin_pidfile_dir}/master-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start master-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop master-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "master-server.pid")
if __name__ == "__main__":
DolphinMasterService().execute()

60
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/dolphin_worker_service.py

@ -0,0 +1,60 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinWorkerService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
no_op_test = format("ls {dolphin_pidfile_dir}/worker-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/worker-server.pid` | grep `cat {dolphin_pidfile_dir}/worker-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start worker-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop worker-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "worker-server.pid")
if __name__ == "__main__":
DolphinWorkerService().execute()

150
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/params.py

@ -0,0 +1,150 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE_2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from resource_management.core.logger import Logger
from resource_management.libraries.functions import default
Logger.initialize_logger()
reload(sys)
sys.setdefaultencoding('utf-8')
# server configurations
config = Script.get_config()
# conf_dir = "/etc/"
dolphin_home = "/opt/soft/apache-dolphinscheduler-incubating-1.2.1"
dolphin_conf_dir = dolphin_home + "/conf"
dolphin_log_dir = dolphin_home + "/logs"
dolphin_bin_dir = dolphin_home + "/bin"
dolphin_lib_jars = dolphin_home + "/lib/*"
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler"
rmHosts = default("/clusterHostInfo/rm_host", [])
# dolphin-env
dolphin_env_map = {}
dolphin_env_map.update(config['configurations']['dolphin-env'])
# which user to install and admin dolphin scheduler
dolphin_user = dolphin_env_map['dolphin.user']
dolphin_group = dolphin_env_map['dolphin.group']
# .dolphinscheduler_env.sh
dolphin_env_path = dolphin_conf_dir + '/env/dolphinscheduler_env.sh'
dolphin_env_content = dolphin_env_map['dolphinscheduler-env-content']
# database config
dolphin_database_config = {}
dolphin_database_config['dolphin_database_type'] = dolphin_env_map['dolphin.database.type']
dolphin_database_config['dolphin_database_host'] = dolphin_env_map['dolphin.database.host']
dolphin_database_config['dolphin_database_port'] = dolphin_env_map['dolphin.database.port']
dolphin_database_config['dolphin_database_username'] = dolphin_env_map['dolphin.database.username']
dolphin_database_config['dolphin_database_password'] = dolphin_env_map['dolphin.database.password']
if 'mysql' == dolphin_database_config['dolphin_database_type']:
dolphin_database_config['dolphin_database_driver'] = 'com.mysql.jdbc.Driver'
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.StdJDBCDelegate'
dolphin_database_config['dolphin_database_url'] = 'jdbc:mysql://' + dolphin_env_map['dolphin.database.host'] \
+ ':' + dolphin_env_map['dolphin.database.port'] \
+ '/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8'
else:
dolphin_database_config['dolphin_database_driver'] = 'org.postgresql.Driver'
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.PostgreSQLDelegate'
dolphin_database_config['dolphin_database_url'] = 'jdbc:postgresql://' + dolphin_env_map['dolphin.database.host'] \
+ ':' + dolphin_env_map['dolphin.database.port'] \
+ '/dolphinscheduler'
# application-alert.properties
dolphin_alert_map = {}
wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token'
wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret'
wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}'
dolphin_alert_map['enterprise.wechat.push.ur'] = wechat_push_url
dolphin_alert_map['enterprise.wechat.token.url'] = wechat_token_url
dolphin_alert_map['enterprise.wechat.team.send.msg'] = wechat_team_send_msg
dolphin_alert_map['enterprise.wechat.user.send.msg'] = wechat_user_send_msg
dolphin_alert_map.update(config['configurations']['dolphin-alert'])
# application-api.properties
dolphin_app_api_map = {}
dolphin_app_api_map['logging.config'] = 'classpath:apiserver_logback.xml'
dolphin_app_api_map['spring.messages.basename'] = 'i18n/messages'
dolphin_app_api_map['server.servlet.context-path'] = '/dolphinscheduler/'
dolphin_app_api_map.update(config['configurations']['dolphin-application-api'])
# application-dao.properties
dolphin_application_map = {}
dolphin_application_map['spring.datasource.type'] = 'com.alibaba.druid.pool.DruidDataSource'
dolphin_application_map['spring.datasource.driver-class-name'] = dolphin_database_config['dolphin_database_driver']
dolphin_application_map['spring.datasource.url'] = dolphin_database_config['dolphin_database_url']
dolphin_application_map['spring.datasource.username'] = dolphin_database_config['dolphin_database_username']
dolphin_application_map['spring.datasource.password'] = dolphin_database_config['dolphin_database_password']
dolphin_application_map.update(config['configurations']['dolphin-application'])
# common.properties
dolphin_common_map = {}
if 'yarn-site' in config['configurations'] and \
'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']:
yarn_resourcemanager_webapp_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']
yarn_application_status_address = 'http://' + yarn_resourcemanager_webapp_address + '/ws/v1/cluster/apps/%s'
dolphin_common_map['yarn.application.status.address'] = yarn_application_status_address
rmHosts = default("/clusterHostInfo/rm_host", [])
if len(rmHosts) > 1:
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ','.join(rmHosts)
else:
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ''
dolphin_common_map_tmp = config['configurations']['dolphin-common']
data_basedir_path = dolphin_common_map_tmp['data.basedir.path']
process_exec_basepath = data_basedir_path + '/exec'
data_download_basedir_path = data_basedir_path + '/download'
dolphin_common_map['process.exec.basepath'] = process_exec_basepath
dolphin_common_map['data.download.basedir.path'] = data_download_basedir_path
dolphin_common_map['dolphinscheduler.env.path'] = dolphin_env_path
zookeeperHosts = default("/clusterHostInfo/zookeeper_hosts", [])
if len(zookeeperHosts) > 0 and "clientPort" in config['configurations']['zoo.cfg']:
clientPort = config['configurations']['zoo.cfg']['clientPort']
zookeeperPort = ":" + clientPort + ","
dolphin_common_map['zookeeper.quorum'] = zookeeperPort.join(zookeeperHosts) + ":" + clientPort
dolphin_common_map.update(config['configurations']['dolphin-common'])
# quartz.properties
dolphin_quartz_map = {}
dolphin_quartz_map['org.quartz.jobStore.driverDelegateClass'] = dolphin_database_config['driverDelegateClass']
dolphin_quartz_map['org.quartz.dataSource.myDs.driver'] = dolphin_database_config['dolphin_database_driver']
dolphin_quartz_map['org.quartz.dataSource.myDs.URL'] = dolphin_database_config['dolphin_database_url']
dolphin_quartz_map['org.quartz.dataSource.myDs.user'] = dolphin_database_config['dolphin_database_username']
dolphin_quartz_map['org.quartz.dataSource.myDs.password'] = dolphin_database_config['dolphin_database_password']
dolphin_quartz_map.update(config['configurations']['dolphin-quartz'])
# if 'ganglia_server_host' in config['clusterHostInfo'] and \
# len(config['clusterHostInfo']['ganglia_server_host'])>0:
# ganglia_installed = True
# ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
# ganglia_report_interval = 60
# else:
# ganglia_installed = False

31
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/service_check.py

@ -0,0 +1,31 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from resource_management.libraries.functions import get_unique_id_and_date
class ServiceCheck(Script):
def service_check(self, env):
import params
#env.set_params(params)
# Execute(format("which pika_server"))
if __name__ == "__main__":
ServiceCheck().execute()

23
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/status_params.py

@ -0,0 +1,23 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
config = Script.get_config()
dolphin_run_dir = "/opt/soft/run/dolphinscheduler/"

7
dockerfile/conf/dolphinscheduler/conf/config/run_config.conf → ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/alert.properties.j2

@ -15,7 +15,6 @@
# limitations under the License.
#
masters=ark0,ark1
workers=ark2,ark3,ark4
alertServer=ark3
apiServers=ark1
{% for key, value in dolphin_alert_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

6
dockerfile/conf/dolphinscheduler/conf/config/install_config.conf → ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/application-api.properties.j2

@ -15,6 +15,6 @@
# limitations under the License.
#
installPath=/data1_1T/dolphinscheduler
deployUser=dolphinscheduler
ips=ark0,ark1,ark2,ark3,ark4
{% for key, value in dolphin_app_api_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

6
dockerfile/conf/dolphinscheduler/conf/env/.escheduler_env.sh → ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/application.properties.j2

@ -15,6 +15,6 @@
# limitations under the License.
#
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
{% for key, value in dolphin_application_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

7
dockerfile/conf/dolphinscheduler/conf/env/.dolphinscheduler_env.sh → ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/common.properties.j2

@ -15,7 +15,6 @@
# limitations under the License.
#
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
export DATAX_HOME=/opt/datax/bin/datax.py
{% for key, value in dolphin_common_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

119
ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/dolphin-daemon.j2

@ -0,0 +1,119 @@
#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
usage="Usage: dolphinscheduler-daemon.sh (start|stop) <command> "
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
startStop=$1
shift
command=$1
shift
echo "Begin $startStop $command......"
BIN_DIR=`dirname $0`
BIN_DIR=`cd "$BIN_DIR"; pwd`
DOLPHINSCHEDULER_HOME=$BIN_DIR/..
export HOSTNAME=`hostname`
DOLPHINSCHEDULER_LIB_JARS={{dolphin_lib_jars}}
DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms4g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
STOP_TIMEOUT=5
log={{dolphin_log_dir}}/dolphinscheduler-$command-$HOSTNAME.out
pid={{dolphin_pidfile_dir}}/$command.pid
cd $DOLPHINSCHEDULER_HOME
if [ "$command" = "api-server" ]; then
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/apiserver_logback.xml -Dspring.profiles.active=api"
CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer
elif [ "$command" = "master-server" ]; then
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/master_logback.xml -Ddruid.mysql.usePingMethod=false"
CLASS=org.apache.dolphinscheduler.server.master.MasterServer
elif [ "$command" = "worker-server" ]; then
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/worker_logback.xml -Ddruid.mysql.usePingMethod=false"
CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer
elif [ "$command" = "alert-server" ]; then
LOG_FILE="-Dlogback.configurationFile={{dolphin_conf_dir}}/alert_logback.xml"
CLASS=org.apache.dolphinscheduler.alert.AlertServer
elif [ "$command" = "logger-server" ]; then
CLASS=org.apache.dolphinscheduler.server.rpc.LoggerServer
elif [ "$command" = "combined-server" ]; then
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/combined_logback.xml -Dspring.profiles.active=api -Dserver.is-combined-server=true"
CLASS=org.apache.dolphinscheduler.api.CombinedApplicationServer
else
echo "Error: No command named \`$command' was found."
exit 1
fi
case $startStop in
(start)
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo $command running as process `cat $pid`. Stop it first.
exit 1
fi
fi
echo starting $command, logging to $log
exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath {{dolphin_conf_dir}}:{{dolphin_lib_jars}} $CLASS"
echo "nohup java $exec_command > $log 2>&1 < /dev/null &"
nohup java $exec_command > $log 2>&1 < /dev/null &
echo $! > $pid
;;
(stop)
if [ -f $pid ]; then
TARGET_PID=`cat $pid`
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo stopping $command
kill $TARGET_PID
sleep $STOP_TIMEOUT
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo "$command did not stop gracefully after $STOP_TIMEOUT seconds: killing with kill -9"
kill -9 $TARGET_PID
fi
else
echo no $command to stop
fi
rm -f $pid
else
echo no $command to stop
fi
;;
(*)
echo $usage
exit 1
;;
esac
echo "End $startStop $command."

20
ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/quartz.properties.j2

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{% for key, value in dolphin_quartz_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

26
ambari_plugin/common-services/DOLPHIN/1.2.1/quicklinks/quicklinks.json

@ -0,0 +1,26 @@
{
"name": "default",
"description": "default quick links configuration",
"configuration": {
"protocol":
{
"type":"http"
},
"links": [
{
"name": "dolphin-application-ui",
"label": "DolphinApplication UI",
"requires_user_name": "false",
"component_name": "DOLPHIN_API",
"url": "%@://%@:%@/dolphinscheduler/ui/view/login/index.html",
"port":{
"http_property": "server.port",
"http_default_port": "12345",
"regex": "^(\\d+)$",
"site": "dolphin-application-api"
}
}
]
}
}

605
ambari_plugin/common-services/DOLPHIN/1.2.1/themes/theme.json

@ -0,0 +1,605 @@
{
"name": "default",
"description": "Default theme for Dolphin Scheduler service",
"configuration": {
"layouts": [
{
"name": "default",
"tabs": [
{
"name": "settings",
"display-name": "Settings",
"layout": {
"tab-rows": "3",
"tab-columns": "3",
"sections": [
{
"name": "dolphin-env-config",
"display-name": "Dolphin Env Config",
"row-index": "0",
"column-index": "0",
"row-span": "1",
"column-span": "2",
"section-rows": "1",
"section-columns": "2",
"subsections": [
{
"name": "env-row1-col1",
"display-name": "Deploy User Info",
"row-index": "0",
"column-index": "0",
"row-span": "1",
"column-span": "1"
},
{
"name": "env-row1-col2",
"display-name": "System Env Optimization",
"row-index": "0",
"column-index": "1",
"row-span": "1",
"column-span": "1"
}
]
},
{
"name": "dolphin-database-config",
"display-name": "Database Config",
"row-index": "1",
"column-index": "0",
"row-span": "1",
"column-span": "2",
"section-rows": "1",
"section-columns": "3",
"subsections": [
{
"name": "database-row1-col1",
"row-index": "0",
"column-index": "0",
"row-span": "1",
"column-span": "1"
},
{
"name": "database-row1-col2",
"row-index": "0",
"column-index": "1",
"row-span": "1",
"column-span": "1"
},
{
"name": "database-row1-col3",
"row-index": "0",
"column-index": "2",
"row-span": "1",
"column-span": "1"
}
]
},
{
"name": "dynamic-config",
"row-index": "2",
"column-index": "0",
"row-span": "1",
"column-span": "2",
"section-rows": "1",
"section-columns": "3",
"subsections": [
{
"name": "dynamic-row1-col1",
"display-name": "Resource FS Config",
"row-index": "0",
"column-index": "0",
"row-span": "1",
"column-span": "1"
},
{
"name": "dynamic-row1-col2",
"display-name": "Kerberos Info",
"row-index": "0",
"column-index": "1",
"row-span": "1",
"column-span": "1"
},
{
"name": "dynamic-row1-col3",
"display-name": "Wechat Info",
"row-index": "0",
"column-index": "1",
"row-span": "1",
"column-span": "1"
}
]
}
]
}
}
]
}
],
"placement": {
"configuration-layout": "default",
"configs": [
{
"config": "dolphin-env/dolphin.database.type",
"subsection-name": "database-row1-col1"
},
{
"config": "dolphin-env/dolphin.database.host",
"subsection-name": "database-row1-col2"
},
{
"config": "dolphin-env/dolphin.database.port",
"subsection-name": "database-row1-col2"
},
{
"config": "dolphin-env/dolphin.database.username",
"subsection-name": "database-row1-col3"
},
{
"config": "dolphin-env/dolphin.database.password",
"subsection-name": "database-row1-col3"
},
{
"config": "dolphin-env/dolphin.user",
"subsection-name": "env-row1-col1"
},
{
"config": "dolphin-env/dolphin.group",
"subsection-name": "env-row1-col1"
},
{
"config": "dolphin-env/dolphinscheduler-env-content",
"subsection-name": "env-row1-col2"
},
{
"config": "dolphin-common/res.upload.startup.type",
"subsection-name": "dynamic-row1-col1"
},
{
"config": "dolphin-common/hdfs.root.user",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/res.upload.startup.type"
],
"if": "${dolphin-common/res.upload.startup.type} === HDFS",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/data.store2hdfs.basepath",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/res.upload.startup.type"
],
"if": "${dolphin-common/res.upload.startup.type} === HDFS",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/fs.defaultFS",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/res.upload.startup.type"
],
"if": "${dolphin-common/res.upload.startup.type} === HDFS",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/fs.s3a.endpoint",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/res.upload.startup.type"
],
"if": "${dolphin-common/res.upload.startup.type} === S3",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/fs.s3a.access.key",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/res.upload.startup.type"
],
"if": "${dolphin-common/res.upload.startup.type} === S3",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/fs.s3a.secret.key",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/res.upload.startup.type"
],
"if": "${dolphin-common/res.upload.startup.type} === S3",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/hadoop.security.authentication.startup.state",
"subsection-name": "dynamic-row1-col2"
},
{
"config": "dolphin-common/java.security.krb5.conf.path",
"subsection-name": "dynamic-row1-col2",
"depends-on": [
{
"configs":[
"dolphin-common/hadoop.security.authentication.startup.state"
],
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/login.user.keytab.username",
"subsection-name": "dynamic-row1-col2",
"depends-on": [
{
"configs":[
"dolphin-common/hadoop.security.authentication.startup.state"
],
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/login.user.keytab.path",
"subsection-name": "dynamic-row1-col2",
"depends-on": [
{
"configs":[
"dolphin-common/hadoop.security.authentication.startup.state"
],
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-alert/enterprise.wechat.enable",
"subsection-name": "dynamic-row1-col3"
},
{
"config": "dolphin-alert/enterprise.wechat.corp.id",
"subsection-name": "dynamic-row1-col3",
"depends-on": [
{
"configs":[
"dolphin-alert/enterprise.wechat.enable"
],
"if": "${dolphin-alert/enterprise.wechat.enable}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-alert/enterprise.wechat.secret",
"subsection-name": "dynamic-row1-col3",
"depends-on": [
{
"configs":[
"dolphin-alert/enterprise.wechat.enable"
],
"if": "${dolphin-alert/enterprise.wechat.enable}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-alert/enterprise.wechat.agent.id",
"subsection-name": "dynamic-row1-col3",
"depends-on": [
{
"configs":[
"dolphin-alert/enterprise.wechat.enable"
],
"if": "${dolphin-alert/enterprise.wechat.enable}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-alert/enterprise.wechat.users",
"subsection-name": "dynamic-row1-col3",
"depends-on": [
{
"configs":[
"dolphin-alert/enterprise.wechat.enable"
],
"if": "${dolphin-alert/enterprise.wechat.enable}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
}
]
},
"widgets": [
{
"config": "dolphin-env/dolphin.database.type",
"widget": {
"type": "combo"
}
},
{
"config": "dolphin-env/dolphin.database.host",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-env/dolphin.database.port",
"widget": {
"type": "text-field",
"units": [
{
"unit-name": "int"
}
]
}
},
{
"config": "dolphin-env/dolphin.database.username",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-env/dolphin.database.password",
"widget": {
"type": "password"
}
},
{
"config": "dolphin-env/dolphin.user",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-env/dolphin.group",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-env/dolphinscheduler-env-content",
"widget": {
"type": "text-area"
}
},
{
"config": "dolphin-common/res.upload.startup.type",
"widget": {
"type": "combo"
}
},
{
"config": "dolphin-common/hdfs.root.user",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/data.store2hdfs.basepath",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/fs.defaultFS",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/fs.s3a.endpoint",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/fs.s3a.access.key",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/fs.s3a.secret.key",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/hadoop.security.authentication.startup.state",
"widget": {
"type": "toggle"
}
},
{
"config": "dolphin-common/java.security.krb5.conf.path",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/login.user.keytab.username",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/login.user.keytab.path",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-alert/enterprise.wechat.enable",
"widget": {
"type": "toggle"
}
},
{
"config": "dolphin-alert/enterprise.wechat.corp.id",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-alert/enterprise.wechat.secret",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-alert/enterprise.wechat.agent.id",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-alert/enterprise.wechat.users",
"widget": {
"type": "text-field"
}
}
]
}
}

BIN
ambari_plugin/readme.pdf

Binary file not shown.

26
ambari_plugin/statcks/DOLPHIN/metainfo.xml

@ -0,0 +1,26 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<metainfo>
<schemaVersion>2.0</schemaVersion>
<services>
<service>
<name>DOLPHIN</name>
<extends>common-services/DOLPHIN/1.2.1</extends>
</service>
</services>
</metainfo>

180
dockerfile/Dockerfile

@ -15,122 +15,78 @@
# limitations under the License.
#
FROM ubuntu:18.04
ENV LANG=C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
ARG version
ARG tar_version
#1,install jdk
RUN apt-get update \
&& apt-get -y install openjdk-8-jdk \
&& rm -rf /var/lib/apt/lists/*
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
FROM nginx:alpine
ARG VERSION
ENV TZ Asia/Shanghai
ENV LANG C.UTF-8
ENV DEBIAN_FRONTEND noninteractive
#1. install dos2unix shadow bash openrc python sudo vim wget iputils net-tools ssh pip kazoo.
#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
RUN apk update && \
apk add dos2unix shadow bash openrc python sudo vim wget iputils net-tools openssh-server py2-pip && \
apk add --update procps && \
openrc boot && \
pip install kazoo
#2. install jdk
RUN apk add openjdk8
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
ENV PATH $JAVA_HOME/bin:$PATH
#install wget
RUN apt-get update && \
apt-get -y install wget
#2,install ZK
#3. install zk
RUN cd /opt && \
wget https://www-us.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz && \
tar -zxvf zookeeper-3.4.14.tar.gz && \
mv zookeeper-3.4.14 zookeeper && \
rm -rf ./zookeeper-*tar.gz && \
wget https://downloads.apache.org/zookeeper/zookeeper-3.5.7/apache-zookeeper-3.5.7-bin.tar.gz && \
tar -zxvf apache-zookeeper-3.5.7-bin.tar.gz && \
mv apache-zookeeper-3.5.7-bin zookeeper && \
mkdir -p /tmp/zookeeper && \
rm -rf ./zookeeper-*tar.gz && \
rm -rf /opt/zookeeper/conf/zoo_sample.cfg
ADD ./dockerfile/conf/zookeeper/zoo.cfg /opt/zookeeper/conf
ENV ZK_HOME=/opt/zookeeper
ENV PATH $PATH:$ZK_HOME/bin
#3,install maven
RUN cd /opt && \
wget http://apache-mirror.rbc.ru/pub/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz && \
tar -zxvf apache-maven-3.3.9-bin.tar.gz && \
mv apache-maven-3.3.9 maven && \
rm -rf ./apache-maven-*tar.gz && \
rm -rf /opt/maven/conf/settings.xml
ADD ./dockerfile/conf/maven/settings.xml /opt/maven/conf
ENV MAVEN_HOME=/opt/maven
ENV PATH $PATH:$MAVEN_HOME/bin
#4,install node
RUN cd /opt && \
wget https://nodejs.org/download/release/v8.9.4/node-v8.9.4-linux-x64.tar.gz && \
tar -zxvf node-v8.9.4-linux-x64.tar.gz && \
mv node-v8.9.4-linux-x64 node && \
rm -rf ./node-v8.9.4-*tar.gz
ENV NODE_HOME=/opt/node
ENV PATH $PATH:$NODE_HOME/bin
#5,install postgresql
RUN apt-get update && \
apt-get install -y postgresql postgresql-contrib sudo && \
sed -i 's/localhost/*/g' /etc/postgresql/10/main/postgresql.conf
#6,install nginx
RUN apt-get update && \
apt-get install -y nginx && \
rm -rf /var/lib/apt/lists/* && \
echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
chown -R www-data:www-data /var/lib/nginx
#7,install sudo,python,vim,ping and ssh command
RUN apt-get update && \
apt-get -y install sudo && \
apt-get -y install python && \
apt-get -y install vim && \
apt-get -y install iputils-ping && \
apt-get -y install net-tools && \
apt-get -y install openssh-server && \
apt-get -y install python-pip && \
pip install kazoo
#8,add dolphinscheduler source code to /opt/dolphinscheduler_source
ADD . /opt/dolphinscheduler_source
#9,backend compilation
RUN cd /opt/dolphinscheduler_source && \
mvn clean package -Prelease -Dmaven.test.skip=true
#10,frontend compilation
RUN chmod -R 777 /opt/dolphinscheduler_source/dolphinscheduler-ui && \
cd /opt/dolphinscheduler_source/dolphinscheduler-ui && \
rm -rf /opt/dolphinscheduler_source/dolphinscheduler-ui/node_modules && \
npm install node-sass --unsafe-perm && \
npm install && \
npm run build
#11,modify dolphinscheduler configuration file
#backend configuration
RUN tar -zxvf /opt/dolphinscheduler_source/dolphinscheduler-dist/dolphinscheduler-backend/target/apache-dolphinscheduler-incubating-${tar_version}-dolphinscheduler-backend-bin.tar.gz -C /opt && \
mv /opt/apache-dolphinscheduler-incubating-${tar_version}-dolphinscheduler-backend-bin /opt/dolphinscheduler && \
rm -rf /opt/dolphinscheduler/conf
ADD ./dockerfile/conf/dolphinscheduler/conf /opt/dolphinscheduler/conf
#frontend nginx configuration
ADD ./dockerfile/conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
#12,open port
EXPOSE 2181 2888 3888 3306 80 12345 8888
COPY ./dockerfile/startup.sh /root/startup.sh
#13,modify permissions and set soft links
RUN chmod +x /root/startup.sh && \
chmod +x /opt/dolphinscheduler/script/create-dolphinscheduler.sh && \
chmod +x /opt/zookeeper/bin/zkServer.sh && \
chmod +x /opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh && \
rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls
ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
ENV ZK_HOME /opt/zookeeper
ENV PATH $ZK_HOME/bin:$PATH
#4. install pg
RUN apk add postgresql postgresql-contrib
#5. add dolphinscheduler
ADD ./apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz /opt/
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin/ /opt/dolphinscheduler/
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#6. modify nginx
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \
rm -rf /etc/nginx/conf.d/*
ADD ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
#7. add configuration and modify permissions and set soft links
ADD ./startup-init-conf.sh /root/startup-init-conf.sh
ADD ./startup.sh /root/startup.sh
ADD ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/
ADD ./conf/dolphinscheduler/env/dolphinscheduler_env /opt/dolphinscheduler/conf/env/
RUN chmod +x /root/startup-init-conf.sh && \
chmod +x /root/startup.sh && \
chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env && \
chmod +x /opt/dolphinscheduler/script/*.sh && \
chmod +x /opt/dolphinscheduler/bin/*.sh && \
chmod +x /opt/zookeeper/bin/*.sh && \
dos2unix /root/startup-init-conf.sh && \
dos2unix /root/startup.sh && \
dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env && \
dos2unix /opt/dolphinscheduler/script/*.sh && \
dos2unix /opt/dolphinscheduler/bin/*.sh && \
dos2unix /opt/zookeeper/bin/*.sh && \
rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls
#8. remove apk index cache
RUN rm -rf /var/cache/apk/*
#9. expose port
EXPOSE 2181 2888 3888 5432 12345 8888
ENTRYPOINT ["/root/startup.sh"]

311
dockerfile/README.md

@ -1,11 +1,306 @@
## Build Image
## What is Dolphin Scheduler?
Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
Github URL: https://github.com/apache/incubator-dolphinscheduler
Official Website: https://dolphinscheduler.apache.org
![Dolphin Scheduler](https://dolphinscheduler.apache.org/img/hlogo_colorful.svg)
[![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md)
[![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md)
## How to use this docker image
#### You can start a dolphinscheduler instance
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test \
-p 8888:8888 \
dolphinscheduler all
```
The default postgres user `root`, postgres password `root` and database `dolphinscheduler` are created in the `startup.sh`.
The default zookeeper is created in the `startup.sh`.
#### Or via Environment Variables **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`ZOOKEEPER_QUORUM`**
You can specify **existing postgres service**. Example:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-p 8888:8888 \
dolphinscheduler all
```
You can specify **existing zookeeper service**. Example:
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-p 8888:8888 \
dolphinscheduler all
```
#### Or start a standalone dolphinscheduler server
You can start a standalone dolphinscheduler server.
* Start a **master server**, For example:
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
dolphinscheduler master-server
```
* Start a **worker server**, For example:
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
dolphinscheduler worker-server
```
* Start a **api server**, For example:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-p 12345:12345 \
dolphinscheduler api-server
```
* Start a **alert server**, For example:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
dolphinscheduler alert-server
```
* Start a **frontend**, For example:
```
$ docker run -dit --name dolphinscheduler \
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
-p 8888:8888 \
dolphinscheduler frontend
```
**Note**: You must be specify `POSTGRESQL_HOST` `POSTGRESQL_PORT` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
## How to build a docker image
You can build a docker image in A Unix-like operating system, You can also build it in Windows operating system.
In Unix-Like, Example:
```bash
$ cd path/incubator-dolphinscheduler
$ sh ./dockerfile/hooks/build
```
In Windows, Example:
```bat
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat
```
Please read `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat` script files if you don't understand
## Environment Variables
The Dolphin Scheduler image uses several environment variables which are easy to miss. While none of the variables are required, they may significantly aid you in using the image.
**`POSTGRESQL_HOST`**
This environment variable sets the host for PostgreSQL. The default value is `127.0.0.1`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_PORT`**
This environment variable sets the port for PostgreSQL. The default value is `5432`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_USERNAME`**
This environment variable sets the username for PostgreSQL. The default value is `root`.
**`POSTGRESQL_PASSWORD`**
This environment variable sets the password for PostgreSQL. The default value is `root`.
**`DOLPHINSCHEDULER_ENV_PATH`**
This environment variable sets the runtime environment for task. The default value is `/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh`.
**`TASK_QUEUE`**
This environment variable sets the task queue for `master-server` and `worker-serverr`. The default value is `zookeeper`.
**`ZOOKEEPER_QUORUM`**
This environment variable sets zookeeper quorum for `master-server` and `worker-serverr`. The default value is `127.0.0.1:2181`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`.
**`MASTER_EXEC_THREADS`**
This environment variable sets exec thread num for `master-server`. The default value is `100`.
**`MASTER_EXEC_TASK_NUM`**
This environment variable sets exec task num for `master-server`. The default value is `20`.
**`MASTER_HEARTBEAT_INTERVAL`**
This environment variable sets heartbeat interval for `master-server`. The default value is `10`.
**`MASTER_TASK_COMMIT_RETRYTIMES`**
This environment variable sets task commit retry times for `master-server`. The default value is `5`.
**`MASTER_TASK_COMMIT_INTERVAL`**
This environment variable sets task commit interval for `master-server`. The default value is `1000`.
**`MASTER_MAX_CPULOAD_AVG`**
This environment variable sets max cpu load avg for `master-server`. The default value is `100`.
**`MASTER_RESERVED_MEMORY`**
This environment variable sets reserved memory for `master-server`. The default value is `0.1`.
**`WORKER_EXEC_THREADS`**
This environment variable sets exec thread num for `worker-server`. The default value is `100`.
**`WORKER_HEARTBEAT_INTERVAL`**
This environment variable sets heartbeat interval for `worker-server`. The default value is `10`.
**`WORKER_FETCH_TASK_NUM`**
This environment variable sets fetch task num for `worker-server`. The default value is `3`.
**`WORKER_MAX_CPULOAD_AVG`**
This environment variable sets max cpu load avg for `worker-server`. The default value is `100`.
**`WORKER_RESERVED_MEMORY`**
This environment variable sets reserved memory for `worker-server`. The default value is `0.1`.
**`XLS_FILE_PATH`**
This environment variable sets xls file path for `alert-server`. The default value is `/tmp/xls`.
**`MAIL_SERVER_HOST`**
This environment variable sets mail server host for `alert-server`. The default value is empty.
**`MAIL_SERVER_PORT`**
This environment variable sets mail server port for `alert-server`. The default value is empty.
**`MAIL_SENDER`**
This environment variable sets mail sender for `alert-server`. The default value is empty.
**`MAIL_USER=`**
This environment variable sets mail user for `alert-server`. The default value is empty.
**`MAIL_PASSWD`**
This environment variable sets mail password for `alert-server`. The default value is empty.
**`MAIL_SMTP_STARTTLS_ENABLE`**
This environment variable sets SMTP tls for `alert-server`. The default value is `true`.
**`MAIL_SMTP_SSL_ENABLE`**
This environment variable sets SMTP ssl for `alert-server`. The default value is `false`.
**`MAIL_SMTP_SSL_TRUST`**
This environment variable sets SMTP ssl truest for `alert-server`. The default value is empty.
**`ENTERPRISE_WECHAT_ENABLE`**
This environment variable sets enterprise wechat enable for `alert-server`. The default value is `false`.
**`ENTERPRISE_WECHAT_CORP_ID`**
This environment variable sets enterprise wechat corp id for `alert-server`. The default value is empty.
**`ENTERPRISE_WECHAT_SECRET`**
This environment variable sets enterprise wechat secret for `alert-server`. The default value is empty.
**`ENTERPRISE_WECHAT_AGENT_ID`**
This environment variable sets enterprise wechat agent id for `alert-server`. The default value is empty.
**`ENTERPRISE_WECHAT_USERS`**
This environment variable sets enterprise wechat users for `alert-server`. The default value is empty.
**`FRONTEND_API_SERVER_HOST`**
This environment variable sets api server host for `frontend`. The default value is `127.0.0.1`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
**`FRONTEND_API_SERVER_PORT`**
This environment variable sets api server port for `frontend`. The default value is `123451`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
## Initialization scripts
If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`.
For example, to add an environment variable `API_SERVER_PORT` in `/root/start-init-conf.sh`:
```
export API_SERVER_PORT=5555
```
and to modify `/opt/dolphinscheduler/conf/application-api.properties.tpl` template file, add server port:
```
cd ..
docker build -t dolphinscheduler --build-arg version=1.1.0 --build-arg tar_version=1.1.0-SNAPSHOT -f dockerfile/Dockerfile .
docker run -p 12345:12345 -p 8888:8888 --rm --name dolphinscheduler -d dolphinscheduler
server.port=${API_SERVER_PORT}
```
* Visit the url: http://127.0.0.1:8888
* UserName:admin Password:dolphinscheduler123
## Note
* MacOS: The memory of docker needs to be set to 4G, default 2G. Steps: Preferences -> Advanced -> adjust resources -> Apply & Restart
`/root/start-init-conf.sh` will dynamically generate config file:
```sh
echo "generate app config"
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
eval "cat << EOF
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
EOF
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
done
echo "generate nginx config"
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
```

306
dockerfile/README_zh_CN.md

@ -0,0 +1,306 @@
## Dolphin Scheduler是什么?
一个分布式易扩展的可视化DAG工作流任务调度系统。致力于解决数据处理流程中错综复杂的依赖关系,使调度系统在数据处理流程中`开箱即用`。
Github URL: https://github.com/apache/incubator-dolphinscheduler
Official Website: https://dolphinscheduler.apache.org
![Dolphin Scheduler](https://dolphinscheduler.apache.org/img/hlogo_colorful.svg)
[![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md)
[![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md)
## 如何使用docker镜像
#### 你可以运行一个dolphinscheduler实例
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test \
-p 8888:8888 \
dolphinscheduler all
```
在`startup.sh`脚本中,默认的创建`Postgres`的用户、密码和数据库,默认值分别为:`root`、`root`、`dolphinscheduler`。
同时,默认的`Zookeeper`也会在`startup.sh`脚本中被创建。
#### 或者通过环境变量 **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`ZOOKEEPER_QUORUM`** 使用已存在的服务
你可以指定一个已经存在的 **`Postgres`** 服务. 如下:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-p 8888:8888 \
dolphinscheduler all
```
你也可以指定一个已经存在的 **Zookeeper** 服务. 如下:
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-p 8888:8888 \
dolphinscheduler all
```
#### 或者运行dolphinscheduler中的部分服务
你能够运行dolphinscheduler中的部分服务。
* 启动一个 **master server**, 如下:
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
dolphinscheduler master-server
```
* 启动一个 **worker server**, 如下:
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
dolphinscheduler worker-server
```
* 启动一个 **api server**, 如下:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-p 12345:12345 \
dolphinscheduler api-server
```
* 启动一个 **alert server**, 如下:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
dolphinscheduler alert-server
```
* 启动一个 **frontend**, 如下:
```
$ docker run -dit --name dolphinscheduler \
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
-p 8888:8888 \
dolphinscheduler frontend
```
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `POSTGRESQL_HOST` `POSTGRESQL_PORT` `ZOOKEEPER_QUORUM`
## 如何构建一个docker镜像
你能够在类Unix系统和Windows系统中构建一个docker镜像。
类Unix系统, 如下:
```bash
$ cd path/incubator-dolphinscheduler
$ sh ./dockerfile/hooks/build
```
Windows系统, 如下:
```bat
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat
```
如果你不理解这些脚本 `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat`,请阅读里面的内容。
## 环境变量
Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些变量不是必须的,但是可以帮助你更容易配置镜像并根据你的需求定义相应的服务配置。
**`POSTGRESQL_HOST`**
配置`PostgreSQL`的`HOST`, 默认值 `127.0.0.1`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_PORT`**
配置`PostgreSQL`的`PORT`, 默认值 `5432`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_USERNAME`**
配置`PostgreSQL`的`USERNAME`, 默认值 `root`
**`POSTGRESQL_PASSWORD`**
配置`PostgreSQL`的`PASSWORD`, 默认值 `root`
**`DOLPHINSCHEDULER_ENV_PATH`**
任务执行时的环境变量配置文件, 默认值 `/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh`
**`TASK_QUEUE`**
配置`master-server`和`worker-serverr`的`Zookeeper`任务队列名, 默认值 `zookeeper`
**`ZOOKEEPER_QUORUM`**
配置`master-server`和`worker-serverr`的`Zookeeper`地址, 默认值 `127.0.0.1:2181`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`MASTER_EXEC_THREADS`**
配置`master-server`中的执行线程数量,默认值 `100`
**`MASTER_EXEC_TASK_NUM`**
配置`master-server`中的执行任务数量,默认值 `20`
**`MASTER_HEARTBEAT_INTERVAL`**
配置`master-server`中的心跳交互时间,默认值 `10`
**`MASTER_TASK_COMMIT_RETRYTIMES`**
配置`master-server`中的任务提交重试次数,默认值 `5`
**`MASTER_TASK_COMMIT_INTERVAL`**
配置`master-server`中的任务提交交互时间,默认值 `1000`
**`MASTER_MAX_CPULOAD_AVG`**
配置`master-server`中的CPU中的`load average`值,默认值 `100`
**`MASTER_RESERVED_MEMORY`**
配置`master-server`的保留内存,默认值 `0.1`
**`WORKER_EXEC_THREADS`**
配置`worker-server`中的执行线程数量,默认值 `100`
**`WORKER_HEARTBEAT_INTERVAL`**
配置`worker-server`中的心跳交互时间,默认值 `10`
**`WORKER_FETCH_TASK_NUM`**
配置`worker-server`中的获取任务的数量,默认值 `3`
**`WORKER_MAX_CPULOAD_AVG`**
配置`worker-server`中的CPU中的最大`load average`值,默认值 `100`
**`WORKER_RESERVED_MEMORY`**
配置`worker-server`的保留内存,默认值 `0.1`
**`XLS_FILE_PATH`**
配置`alert-server`的`XLS`文件的存储路径,默认值 `/tmp/xls`
**`MAIL_SERVER_HOST`**
配置`alert-server`的邮件服务地址,默认值 `空`
**`MAIL_SERVER_PORT`**
配置`alert-server`的邮件服务端口,默认值 `空`
**`MAIL_SENDER`**
配置`alert-server`的邮件发送人,默认值 `空`
**`MAIL_USER=`**
配置`alert-server`的邮件服务用户名,默认值 `空`
**`MAIL_PASSWD`**
配置`alert-server`的邮件服务用户密码,默认值 `空`
**`MAIL_SMTP_STARTTLS_ENABLE`**
配置`alert-server`的邮件服务是否启用TLS,默认值 `true`
**`MAIL_SMTP_SSL_ENABLE`**
配置`alert-server`的邮件服务是否启用SSL,默认值 `false`
**`MAIL_SMTP_SSL_TRUST`**
配置`alert-server`的邮件服务SSL的信任地址,默认值 `空`
**`ENTERPRISE_WECHAT_ENABLE`**
配置`alert-server`的邮件服务是否启用企业微信,默认值 `false`
**`ENTERPRISE_WECHAT_CORP_ID`**
配置`alert-server`的邮件服务企业微信`ID`,默认值 `空`
**`ENTERPRISE_WECHAT_SECRET`**
配置`alert-server`的邮件服务企业微信`SECRET`,默认值 `空`
**`ENTERPRISE_WECHAT_AGENT_ID`**
配置`alert-server`的邮件服务企业微信`AGENT_ID`,默认值 `空`
**`ENTERPRISE_WECHAT_USERS`**
配置`alert-server`的邮件服务企业微信`USERS`,默认值 `空`
**`FRONTEND_API_SERVER_HOST`**
配置`frontend`的连接`api-server`的地址,默认值 `127.0.0.1`
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
**`FRONTEND_API_SERVER_PORT`**
配置`frontend`的连接`api-server`的端口,默认值 `12345`
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
## 初始化脚本
如果你想在编译的时候或者运行的时候附加一些其它的操作及新增一些环境变量,你可以在`/root/start-init-conf.sh`文件中进行修改,同时如果涉及到配置文件的修改,请在`/opt/dolphinscheduler/conf/*.tpl`中修改相应的配置文件
例如,在`/root/start-init-conf.sh`添加一个环境变量`API_SERVER_PORT`:
```
export API_SERVER_PORT=5555
```
当添加以上环境变量后,你应该在相应的模板文件`/opt/dolphinscheduler/conf/application-api.properties.tpl`中添加这个环境变量配置:
```
server.port=${API_SERVER_PORT}
```
`/root/start-init-conf.sh`将根据模板文件动态的生成配置文件:
```sh
echo "generate app config"
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
eval "cat << EOF
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
EOF
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
done
echo "generate nginx config"
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
```

32
dockerfile/conf/dolphinscheduler/conf/alert.properties → dockerfile/conf/dolphinscheduler/alert.properties.tpl

@ -14,33 +14,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
#alert type is EMAIL/SMS
alert.type=EMAIL
# alter msg template, default is html template
#alert.template=html
# mail server configuration
mail.protocol=SMTP
mail.server.host=smtp.126.com
mail.server.port=
mail.sender=dolphinscheduler@126.com
mail.user=dolphinscheduler@126.com
mail.passwd=escheduler123
mail.server.host=${MAIL_SERVER_HOST}
mail.server.port=${MAIL_SERVER_PORT}
mail.sender=${MAIL_SENDER}
mail.user=${MAIL_USER}
mail.passwd=${MAIL_PASSWD}
# TLS
mail.smtp.starttls.enable=false
mail.smtp.starttls.enable=${MAIL_SMTP_STARTTLS_ENABLE}
# SSL
mail.smtp.ssl.enable=true
mail.smtp.ssl.trust=smtp.126.com
mail.smtp.ssl.enable=${MAIL_SMTP_SSL_ENABLE}
mail.smtp.ssl.trust=${MAIL_SMTP_SSL_TRUST}
#xls file path,need create if not exist
xls.file.path=/tmp/xls
xls.file.path=${XLS_FILE_PATH}
# Enterprise WeChat configuration
enterprise.wechat.enable=false
enterprise.wechat.corp.id=xxxxxxx
enterprise.wechat.secret=xxxxxxx
enterprise.wechat.agent.id=xxxxxxx
enterprise.wechat.users=xxxxxxx
enterprise.wechat.enable=${ENTERPRISE_WECHAT_ENABLE}
enterprise.wechat.corp.id=${ENTERPRISE_WECHAT_CORP_ID}
enterprise.wechat.secret=${ENTERPRISE_WECHAT_SECRET}
enterprise.wechat.agent.id=${ENTERPRISE_WECHAT_AGENT_ID}
enterprise.wechat.users=${ENTERPRISE_WECHAT_USERS}
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}

17
dockerfile/conf/dolphinscheduler/conf/application-api.properties → dockerfile/conf/dolphinscheduler/application-api.properties.tpl

@ -14,27 +14,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
logging.config=classpath:apiserver_logback.xml
# server port
server.port=12345
# session config
server.servlet.session.timeout=7200
# servlet config
server.servlet.context-path=/dolphinscheduler/
# file size limit for upload
spring.servlet.multipart.max-file-size=1024MB
spring.servlet.multipart.max-request-size=1024MB
#post content
# post content
server.jetty.max-http-post-size=5000000
# i18n
spring.messages.encoding=UTF-8
#i18n classpath folder , file prefix messages, if have many files, use "," seperator
spring.messages.basename=i18n/messages
# Authentication types (supported types: PASSWORD)
security.authentication.type=PASSWORD

60
dockerfile/conf/dolphinscheduler/conf/application-dao.properties → dockerfile/conf/dolphinscheduler/application.properties.tpl

@ -17,70 +17,57 @@
# base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
# postgresql
# postgre
spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
spring.datasource.username=root
spring.datasource.password=root@123
spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/dolphinscheduler?characterEncoding=utf8
# mysql
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
spring.datasource.username=${POSTGRESQL_USERNAME}
spring.datasource.password=${POSTGRESQL_PASSWORD}
# connection configuration
spring.datasource.initialSize=5
# min connection number
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
spring.datasource.spring.datasource.filters=stat,wall,log4j
spring.datasource.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
#mybatis
mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml
mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums
#Entity scan, where multiple packages are separated by a comma or semicolon
mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity
#Primary key type AUTO:" database ID AUTO ", INPUT:" user INPUT ID", ID_WORKER:" global unique ID (numeric type unique ID)", UUID:" global unique ID UUID";
mybatis-plus.global-config.db-config.id-type=AUTO
#Field policy IGNORED:" ignore judgment ",NOT_NULL:" not NULL judgment "),NOT_EMPTY:" not NULL judgment"
mybatis-plus.global-config.db-config.field-strategy=NOT_NULL
#The hump underline is converted
mybatis-plus.global-config.db-config.column-underline=true
mybatis-plus.global-config.db-config.logic-delete-value=-1
@ -92,12 +79,37 @@ mybatis-plus.configuration.cache-enabled=false
mybatis-plus.configuration.call-setters-on-nulls=true
mybatis-plus.configuration.jdbc-type-for-null=null
# master settings
# master execute thread num
master.exec.threads=${MASTER_EXEC_THREADS}
# master execute task number in parallel
master.exec.task.num=${MASTER_EXEC_TASK_NUM}
# master heartbeat interval
master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL}
# master commit task retry times
master.task.commit.retryTimes=${MASTER_TASK_COMMIT_RETRYTIMES}
# master commit task interval
master.task.commit.interval=${MASTER_TASK_COMMIT_INTERVAL}
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=${MASTER_MAX_CPULOAD_AVG}
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=${MASTER_RESERVED_MEMORY}
# worker settings
# worker execute thread num
worker.exec.threads=${WORKER_EXEC_THREADS}
# worker heartbeat interval
worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL}
# submit the number of tasks at a time
worker.fetch.task.num=${WORKER_FETCH_TASK_NUM}
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG}
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=${WORKER_RESERVED_MEMORY}
# data quality analysis is not currently in use. please ignore the following configuration
# task record flag
# task record
task.record.flag=false
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
task.record.datasource.username=xx
task.record.datasource.password=xx
# Logger Config
#logging.level.org.apache.dolphinscheduler.dao=debug

69
dockerfile/conf/dolphinscheduler/conf/common/common.properties → dockerfile/conf/dolphinscheduler/common.properties.tpl

@ -16,44 +16,69 @@
#
#task queue implementation, default "zookeeper"
dolphinscheduler.queue.impl=zookeeper
dolphinscheduler.queue.impl=${TASK_QUEUE}
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/dolphinscheduler
#zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
zookeeper.quorum=${ZOOKEEPER_QUORUM}
#dolphinscheduler root directory
zookeeper.dolphinscheduler.root=/dolphinscheduler
#dolphinscheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.base.sleep=100
zookeeper.retry.max.sleep=30000
zookeeper.retry.maxtime=5
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/dolphinscheduler/download
#============================================================================
# System
#============================================================================
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH}
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
# is development state? default "false"
development.state=true
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/dolphinscheduler/exec
# resource upload startup type : HDFS,S3,NONE
res.upload.startup.type=NONE
#============================================================================
# HDFS
#============================================================================
# Users who have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
data.store2hdfs.basepath=/dolphinscheduler
# resource upload startup type : HDFS,S3,NONE
res.upload.startup.type=NONE
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/dolphinscheduler
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/dolphinscheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/dolphinscheduler/exec
# whether kerberos starts
hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path
java.security.krb5.conf.path=/opt/krb5.conf
# loginUserFromKeytab user
login.user.keytab.username=hdfs-mycluster@ESZ.COM
# loginUserFromKeytab path
login.user.keytab.path=/opt/hdfs.headless.keytab
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
dolphinscheduler.env.path=/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
#============================================================================
# S3
#============================================================================
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
# to the conf directory,support s3,for example : s3a://dolphinscheduler
fs.defaultFS=hdfs://mycluster:8020
# s3 need,s3 endpoint
fs.s3a.endpoint=http://192.168.199.91:9010
# s3 need,s3 access key
fs.s3a.access.key=A3DXS30FO22544RE
# s3 need,s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default "false"
development.state=true

49
dockerfile/conf/dolphinscheduler/conf/alert_logback.xml

@ -1,49 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-alert.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>20</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="ALERTLOGFILE"/>
</root>
</configuration>

60
dockerfile/conf/dolphinscheduler/conf/apiserver_logback.xml

@ -1,60 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- Log level filter -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<file>${log.base}/dolphinscheduler-api-server.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="APISERVERLOGFILE" />
</root>
</configuration>

80
dockerfile/conf/dolphinscheduler/conf/combined_logback.xml

@ -1,80 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
%highlight([%level]) %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{10}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.common.log.TaskLogFilter"></filter>
<Discriminator class="org.apache.dolphinscheduler.common.log.TaskLogDiscriminator">
<key>taskAppId</key>
<logBase>${log.base}</logBase>
</Discriminator>
<sift>
<appender name="FILE-${taskAppId}" class="ch.qos.logback.core.FileAppender">
<file>${log.base}/${taskAppId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
</sift>
</appender>
<appender name="COMBINEDLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-combined.log</file>
<filter class="org.apache.dolphinscheduler.common.log.WorkerLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-combined.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
     
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
  
</appender>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="COMBINEDLOGFILE"/>
</root>
</configuration>

35
dockerfile/conf/dolphinscheduler/conf/common/hadoop/hadoop.properties

@ -1,35 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
# to the conf directory,support s3,for example : s3a://dolphinscheduler
fs.defaultFS=hdfs://mycluster:8020
# s3 need,s3 endpoint
fs.s3a.endpoint=http://192.168.199.91:9010
# s3 need,s3 access key
fs.s3a.access.key=A3DXS30FO22544RE
# s3 need,s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s

252
dockerfile/conf/dolphinscheduler/conf/i18n/messages.properties

@ -1,252 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
QUERY_SCHEDULE_LIST_NOTES=query schedule list
EXECUTE_PROCESS_TAG=execute process related operation
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
RUN_PROCESS_INSTANCE_NOTES=run process instance
START_NODE_LIST=start node list(node name)
TASK_DEPEND_TYPE=task depend type
COMMAND_TYPE=command type
RUN_MODE=run mode
TIMEOUT=timeout
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
EXECUTE_TYPE=execute type
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
GET_RECEIVER_CC_NOTES=query receiver cc
DESC=description
GROUP_NAME=group name
GROUP_TYPE=group type
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
UPDATE_ALERT_GROUP_NOTES=update alert group
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
GRANT_ALERT_GROUP_NOTES=grant alert group
USER_IDS=user id list
ALERT_GROUP_TAG=alert group related operation
CREATE_ALERT_GROUP_NOTES=create alert group
WORKER_GROUP_TAG=worker group related operation
SAVE_WORKER_GROUP_NOTES=create worker group
WORKER_GROUP_NAME=worker group name
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
DATA_ANALYSIS_TAG=analysis related operation of task state
COUNT_TASK_STATE_NOTES=count task state
COUNT_PROCESS_INSTANCE_NOTES=count process instance state
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
COUNT_COMMAND_STATE_NOTES=count command state
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
ACCESS_TOKEN_TAG=access token related operation
MONITOR_TAG=monitor related operation
MASTER_LIST_NOTES=master server list
WORKER_LIST_NOTES=worker server list
QUERY_DATABASE_STATE_NOTES=query database state
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
TASK_STATE=task instance state
SOURCE_TABLE=SOURCE TABLE
DEST_TABLE=dest table
TASK_DATE=task date
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
DATA_SOURCE_TAG=data source related operation
CREATE_DATA_SOURCE_NOTES=create data source
DATA_SOURCE_NAME=data source name
DATA_SOURCE_NOTE=data source desc
DB_TYPE=database type
DATA_SOURCE_HOST=DATA SOURCE HOST
DATA_SOURCE_PORT=data source port
DATABASE_NAME=database name
QUEUE_TAG=queue related operation
QUERY_QUEUE_LIST_NOTES=query queue list
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
CREATE_QUEUE_NOTES=create queue
YARN_QUEUE_NAME=yarn(hadoop) queue name
QUEUE_ID=queue id
TENANT_DESC=tenant desc
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
QUERY_TENANT_LIST_NOTES=query tenant list
UPDATE_TENANT_NOTES=update tenant
DELETE_TENANT_NOTES=delete tenant
RESOURCES_TAG=resource center related operation
CREATE_RESOURCE_NOTES=create resource
RESOURCE_TYPE=resource file type
RESOURCE_NAME=resource name
RESOURCE_DESC=resource file desc
RESOURCE_FILE=resource file
RESOURCE_ID=resource id
QUERY_RESOURCE_LIST_NOTES=query resource list
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
VIEW_RESOURCE_BY_ID_NOTES=view resource by id
ONLINE_CREATE_RESOURCE_NOTES=online create resource
SUFFIX=resource file suffix
CONTENT=resource file content
UPDATE_RESOURCE_NOTES=edit resource file online
DOWNLOAD_RESOURCE_NOTES=download resource file
CREATE_UDF_FUNCTION_NOTES=create udf function
UDF_TYPE=UDF type
FUNC_NAME=function name
CLASS_NAME=package and class name
ARG_TYPES=arguments
UDF_DESC=udf desc
VIEW_UDF_FUNCTION_NOTES=view udf function
UPDATE_UDF_FUNCTION_NOTES=update udf function
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
DELETE_UDF_FUNCTION_NOTES=delete udf function
AUTHORIZED_FILE_NOTES=authorized file
UNAUTHORIZED_FILE_NOTES=unauthorized file
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
VERIFY_QUEUE_NOTES=verify queue
TENANT_TAG=tenant related operation
CREATE_TENANT_NOTES=create tenant
TENANT_CODE=tenant code
TENANT_NAME=tenant name
QUEUE_NAME=queue name
PASSWORD=password
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
PROJECT_TAG=project related operation
CREATE_PROJECT_NOTES=create project
PROJECT_DESC=project description
UPDATE_PROJECT_NOTES=update project
PROJECT_ID=project id
QUERY_PROJECT_BY_ID_NOTES=query project info by project id
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
DELETE_PROJECT_BY_ID_NOTES=delete project by id
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
QUERY_ALL_PROJECT_LIST_NOTES=query all project list
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
TASK_RECORD_TAG=task record related operation
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
CREATE_TOKEN_NOTES=create token ,note: please login first
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
SCHEDULE=schedule
WARNING_TYPE=warning type(sending strategy)
WARNING_GROUP_ID=warning group id
FAILURE_STRATEGY=failure strategy
RECEIVERS=receivers
RECEIVERS_CC=receivers cc
WORKER_GROUP_ID=worker server group id
PROCESS_INSTANCE_PRIORITY=process instance priority
UPDATE_SCHEDULE_NOTES=update schedule
SCHEDULE_ID=schedule id
ONLINE_SCHEDULE_NOTES=online schedule
OFFLINE_SCHEDULE_NOTES=offline schedule
QUERY_SCHEDULE_NOTES=query schedule
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
LOGIN_TAG=User login related operations
USER_NAME=user name
PROJECT_NAME=project name
CREATE_PROCESS_DEFINITION_NOTES=create process definition
PROCESS_DEFINITION_NAME=process definition name
PROCESS_DEFINITION_JSON=process definition detail info (json format)
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
PROCESS_DEFINITION_DESC=process definition desc
PROCESS_DEFINITION_TAG=process definition related opertation
SIGNOUT_NOTES=logout
USER_PASSWORD=user password
UPDATE_PROCESS_INSTANCE_NOTES=update process instance
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
LOGIN_NOTES=user login
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
PROCESS_DEFINITION_ID=process definition id
PROCESS_DEFINITION_IDS=process definition ids
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
PAGE_NO=page no
PROCESS_INSTANCE_ID=process instance id
PROCESS_INSTANCE_JSON=process instance info(json format)
SCHEDULE_TIME=schedule time
SYNC_DEFINE=update the information of the process instance to the process definition\
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
SEARCH_VAL=search val
USER_ID=user id
PAGE_SIZE=page size
LIMIT=limit
VIEW_TREE_NOTES=view tree
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
PROCESS_DEFINITION_ID_LIST=process definition id list
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
TASK_ID=task instance id
SKIP_LINE_NUM=skip line num
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
USERS_TAG=users related operation
SCHEDULER_TAG=scheduler related operation
CREATE_SCHEDULE_NOTES=create schedule
CREATE_USER_NOTES=create user
TENANT_ID=tenant id
QUEUE=queue
EMAIL=email
PHONE=phone
QUERY_USER_LIST_NOTES=query user list
UPDATE_USER_NOTES=update user
DELETE_USER_BY_ID_NOTES=delete user by id
GRANT_PROJECT_NOTES=GRANT PROJECT
PROJECT_IDS=project ids(string format, multiple projects separated by ",")
GRANT_RESOURCE_NOTES=grant resource file
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
GET_USER_INFO_NOTES=get user info
LIST_USER_NOTES=list user
VERIFY_USER_NAME_NOTES=verify user name
UNAUTHORIZED_USER_NOTES=cancel authorization
ALERT_GROUP_ID=alert group id
AUTHORIZED_USER_NOTES=authorized user
GRANT_UDF_FUNC_NOTES=grant udf function
UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
GRANT_DATASOURCE_NOTES=grant datasource
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
VIEW_GANTT_NOTES=view gantt
SUB_PROCESS_INSTANCE_ID=sub process instance id
TASK_NAME=task instance name
TASK_INSTANCE_TAG=task instance related operation
LOGGER_TAG=log related operation
PROCESS_INSTANCE_TAG=process instance related operation
EXECUTION_STATUS=runing status for workflow and task nodes
HOST=ip address of running task
START_DATE=start date
END_DATE=end date
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
UPDATE_DATA_SOURCE_NOTES=update data source
DATA_SOURCE_ID=DATA SOURCE ID
QUERY_DATA_SOURCE_NOTES=query data source by id
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
DELETE_DATA_SOURCE_NOTES=delete data source
VERIFY_DATA_SOURCE_NOTES=verify data source
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id

252
dockerfile/conf/dolphinscheduler/conf/i18n/messages_en_US.properties

@ -1,252 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
QUERY_SCHEDULE_LIST_NOTES=query schedule list
EXECUTE_PROCESS_TAG=execute process related operation
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
RUN_PROCESS_INSTANCE_NOTES=run process instance
START_NODE_LIST=start node list(node name)
TASK_DEPEND_TYPE=task depend type
COMMAND_TYPE=command type
RUN_MODE=run mode
TIMEOUT=timeout
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
EXECUTE_TYPE=execute type
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
GET_RECEIVER_CC_NOTES=query receiver cc
DESC=description
GROUP_NAME=group name
GROUP_TYPE=group type
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
UPDATE_ALERT_GROUP_NOTES=update alert group
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
GRANT_ALERT_GROUP_NOTES=grant alert group
USER_IDS=user id list
ALERT_GROUP_TAG=alert group related operation
CREATE_ALERT_GROUP_NOTES=create alert group
WORKER_GROUP_TAG=worker group related operation
SAVE_WORKER_GROUP_NOTES=create worker group
WORKER_GROUP_NAME=worker group name
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
DATA_ANALYSIS_TAG=analysis related operation of task state
COUNT_TASK_STATE_NOTES=count task state
COUNT_PROCESS_INSTANCE_NOTES=count process instance state
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
COUNT_COMMAND_STATE_NOTES=count command state
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
ACCESS_TOKEN_TAG=access token related operation
MONITOR_TAG=monitor related operation
MASTER_LIST_NOTES=master server list
WORKER_LIST_NOTES=worker server list
QUERY_DATABASE_STATE_NOTES=query database state
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
TASK_STATE=task instance state
SOURCE_TABLE=SOURCE TABLE
DEST_TABLE=dest table
TASK_DATE=task date
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
DATA_SOURCE_TAG=data source related operation
CREATE_DATA_SOURCE_NOTES=create data source
DATA_SOURCE_NAME=data source name
DATA_SOURCE_NOTE=data source desc
DB_TYPE=database type
DATA_SOURCE_HOST=DATA SOURCE HOST
DATA_SOURCE_PORT=data source port
DATABASE_NAME=database name
QUEUE_TAG=queue related operation
QUERY_QUEUE_LIST_NOTES=query queue list
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
CREATE_QUEUE_NOTES=create queue
YARN_QUEUE_NAME=yarn(hadoop) queue name
QUEUE_ID=queue id
TENANT_DESC=tenant desc
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
QUERY_TENANT_LIST_NOTES=query tenant list
UPDATE_TENANT_NOTES=update tenant
DELETE_TENANT_NOTES=delete tenant
RESOURCES_TAG=resource center related operation
CREATE_RESOURCE_NOTES=create resource
RESOURCE_TYPE=resource file type
RESOURCE_NAME=resource name
RESOURCE_DESC=resource file desc
RESOURCE_FILE=resource file
RESOURCE_ID=resource id
QUERY_RESOURCE_LIST_NOTES=query resource list
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
VIEW_RESOURCE_BY_ID_NOTES=view resource by id
ONLINE_CREATE_RESOURCE_NOTES=online create resource
SUFFIX=resource file suffix
CONTENT=resource file content
UPDATE_RESOURCE_NOTES=edit resource file online
DOWNLOAD_RESOURCE_NOTES=download resource file
CREATE_UDF_FUNCTION_NOTES=create udf function
UDF_TYPE=UDF type
FUNC_NAME=function name
CLASS_NAME=package and class name
ARG_TYPES=arguments
UDF_DESC=udf desc
VIEW_UDF_FUNCTION_NOTES=view udf function
UPDATE_UDF_FUNCTION_NOTES=update udf function
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
DELETE_UDF_FUNCTION_NOTES=delete udf function
AUTHORIZED_FILE_NOTES=authorized file
UNAUTHORIZED_FILE_NOTES=unauthorized file
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
VERIFY_QUEUE_NOTES=verify queue
TENANT_TAG=tenant related operation
CREATE_TENANT_NOTES=create tenant
TENANT_CODE=tenant code
TENANT_NAME=tenant name
QUEUE_NAME=queue name
PASSWORD=password
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
PROJECT_TAG=project related operation
CREATE_PROJECT_NOTES=create project
PROJECT_DESC=project description
UPDATE_PROJECT_NOTES=update project
PROJECT_ID=project id
QUERY_PROJECT_BY_ID_NOTES=query project info by project id
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
QUERY_ALL_PROJECT_LIST_NOTES=query all project list
DELETE_PROJECT_BY_ID_NOTES=delete project by id
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
TASK_RECORD_TAG=task record related operation
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
CREATE_TOKEN_NOTES=create token ,note: please login first
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
SCHEDULE=schedule
WARNING_TYPE=warning type(sending strategy)
WARNING_GROUP_ID=warning group id
FAILURE_STRATEGY=failure strategy
RECEIVERS=receivers
RECEIVERS_CC=receivers cc
WORKER_GROUP_ID=worker server group id
PROCESS_INSTANCE_PRIORITY=process instance priority
UPDATE_SCHEDULE_NOTES=update schedule
SCHEDULE_ID=schedule id
ONLINE_SCHEDULE_NOTES=online schedule
OFFLINE_SCHEDULE_NOTES=offline schedule
QUERY_SCHEDULE_NOTES=query schedule
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
LOGIN_TAG=User login related operations
USER_NAME=user name
PROJECT_NAME=project name
CREATE_PROCESS_DEFINITION_NOTES=create process definition
PROCESS_DEFINITION_NAME=process definition name
PROCESS_DEFINITION_JSON=process definition detail info (json format)
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
PROCESS_DEFINITION_DESC=process definition desc
PROCESS_DEFINITION_TAG=process definition related opertation
SIGNOUT_NOTES=logout
USER_PASSWORD=user password
UPDATE_PROCESS_INSTANCE_NOTES=update process instance
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
LOGIN_NOTES=user login
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
PROCESS_DEFINITION_ID=process definition id
PROCESS_DEFINITION_IDS=process definition ids
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
PAGE_NO=page no
PROCESS_INSTANCE_ID=process instance id
PROCESS_INSTANCE_JSON=process instance info(json format)
SCHEDULE_TIME=schedule time
SYNC_DEFINE=update the information of the process instance to the process definition\
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
SEARCH_VAL=search val
USER_ID=user id
PAGE_SIZE=page size
LIMIT=limit
VIEW_TREE_NOTES=view tree
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
PROCESS_DEFINITION_ID_LIST=process definition id list
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
TASK_ID=task instance id
SKIP_LINE_NUM=skip line num
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
USERS_TAG=users related operation
SCHEDULER_TAG=scheduler related operation
CREATE_SCHEDULE_NOTES=create schedule
CREATE_USER_NOTES=create user
TENANT_ID=tenant id
QUEUE=queue
EMAIL=email
PHONE=phone
QUERY_USER_LIST_NOTES=query user list
UPDATE_USER_NOTES=update user
DELETE_USER_BY_ID_NOTES=delete user by id
GRANT_PROJECT_NOTES=GRANT PROJECT
PROJECT_IDS=project ids(string format, multiple projects separated by ",")
GRANT_RESOURCE_NOTES=grant resource file
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
GET_USER_INFO_NOTES=get user info
LIST_USER_NOTES=list user
VERIFY_USER_NAME_NOTES=verify user name
UNAUTHORIZED_USER_NOTES=cancel authorization
ALERT_GROUP_ID=alert group id
AUTHORIZED_USER_NOTES=authorized user
GRANT_UDF_FUNC_NOTES=grant udf function
UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
GRANT_DATASOURCE_NOTES=grant datasource
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
VIEW_GANTT_NOTES=view gantt
SUB_PROCESS_INSTANCE_ID=sub process instance id
TASK_NAME=task instance name
TASK_INSTANCE_TAG=task instance related operation
LOGGER_TAG=log related operation
PROCESS_INSTANCE_TAG=process instance related operation
EXECUTION_STATUS=runing status for workflow and task nodes
HOST=ip address of running task
START_DATE=start date
END_DATE=end date
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
UPDATE_DATA_SOURCE_NOTES=update data source
DATA_SOURCE_ID=DATA SOURCE ID
QUERY_DATA_SOURCE_NOTES=query data source by id
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
DELETE_DATA_SOURCE_NOTES=delete data source
VERIFY_DATA_SOURCE_NOTES=verify data source
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id

250
dockerfile/conf/dolphinscheduler/conf/i18n/messages_zh_CN.properties

@ -1,250 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
QUERY_SCHEDULE_LIST_NOTES=查询定时列表
PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作
RUN_PROCESS_INSTANCE_NOTES=运行流程实例
START_NODE_LIST=开始节点列表(节点name)
TASK_DEPEND_TYPE=任务依赖类型
COMMAND_TYPE=指令类型
RUN_MODE=运行模式
TIMEOUT=超时时间
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等)
EXECUTE_TYPE=执行类型
START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义
DESC=备注(描述)
GROUP_NAME=组名称
GROUP_TYPE=组类型
QUERY_ALERT_GROUP_LIST_NOTES=告警组列表\
UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组
DELETE_ALERT_GROUP_BY_ID_NOTES=删除告警组通过ID
VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在
GRANT_ALERT_GROUP_NOTES=授权告警组
USER_IDS=用户ID列表
ALERT_GROUP_TAG=告警组相关操作
WORKER_GROUP_TAG=Worker分组管理
SAVE_WORKER_GROUP_NOTES=创建Worker分组\
WORKER_GROUP_NAME=Worker分组名称
WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割\
QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理
QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组
DELETE_WORKER_GROUP_BY_ID_NOTES=删除worker group通过ID
DATA_ANALYSIS_TAG=任务状态分析相关操作
COUNT_TASK_STATE_NOTES=任务状态统计
COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义
COUNT_COMMAND_STATE_NOTES=统计命令状态
COUNT_QUEUE_STATE_NOTES=统计队列里任务状态
ACCESS_TOKEN_TAG=access token相关操作,需要先登录
MONITOR_TAG=监控相关操作
MASTER_LIST_NOTES=master服务列表
WORKER_LIST_NOTES=worker服务列表
QUERY_DATABASE_STATE_NOTES=查询数据库状态
QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态
TASK_STATE=任务实例状态
SOURCE_TABLE=源表
DEST_TABLE=目标表
TASK_DATE=任务时间
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表
DATA_SOURCE_TAG=数据源相关操作
CREATE_DATA_SOURCE_NOTES=创建数据源
DATA_SOURCE_NAME=数据源名称
DATA_SOURCE_NOTE=数据源描述
DB_TYPE=数据源类型
DATA_SOURCE_HOST=IP主机名
DATA_SOURCE_PORT=数据源端口
DATABASE_NAME=数据库名
QUEUE_TAG=队列相关操作
QUERY_QUEUE_LIST_NOTES=查询队列列表
QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表
CREATE_QUEUE_NOTES=创建队列
YARN_QUEUE_NAME=hadoop yarn队列名
QUEUE_ID=队列ID
TENANT_DESC=租户描述
QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表
QUERY_TENANT_LIST_NOTES=查询租户列表
UPDATE_TENANT_NOTES=更新租户
DELETE_TENANT_NOTES=删除租户
RESOURCES_TAG=资源中心相关操作
CREATE_RESOURCE_NOTES=创建资源
RESOURCE_TYPE=资源文件类型
RESOURCE_NAME=资源文件名称
RESOURCE_DESC=资源文件描述
RESOURCE_FILE=资源文件
RESOURCE_ID=资源ID
QUERY_RESOURCE_LIST_NOTES=查询资源列表
DELETE_RESOURCE_BY_ID_NOTES=删除资源通过ID
VIEW_RESOURCE_BY_ID_NOTES=浏览资源通通过ID
ONLINE_CREATE_RESOURCE_NOTES=在线创建资源
SUFFIX=资源文件后缀
CONTENT=资源文件内容
UPDATE_RESOURCE_NOTES=在线更新资源文件
DOWNLOAD_RESOURCE_NOTES=下载资源文件
CREATE_UDF_FUNCTION_NOTES=创建UDF函数
UDF_TYPE=UDF类型
FUNC_NAME=函数名称
CLASS_NAME=包名类名
ARG_TYPES=参数
UDF_DESC=udf描述,使用说明
VIEW_UDF_FUNCTION_NOTES=查看udf函数
UPDATE_UDF_FUNCTION_NOTES=更新udf函数
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表
VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名
DELETE_UDF_FUNCTION_NOTES=删除UDF函数
AUTHORIZED_FILE_NOTES=授权文件
UNAUTHORIZED_FILE_NOTES=取消授权文件
AUTHORIZED_UDF_FUNC_NOTES=授权udf函数
UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权
VERIFY_QUEUE_NOTES=验证队列
TENANT_TAG=租户相关操作
CREATE_TENANT_NOTES=创建租户
TENANT_CODE=租户编码
TENANT_NAME=租户名称
QUEUE_NAME=队列名
PASSWORD=密码
DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...}
PROJECT_TAG=项目相关操作
CREATE_PROJECT_NOTES=创建项目
PROJECT_DESC=项目描述
UPDATE_PROJECT_NOTES=更新项目
PROJECT_ID=项目ID
QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息
QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表
QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目
DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID
QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目
QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目
TASK_RECORD_TAG=任务记录相关操作
QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表
CREATE_TOKEN_NOTES=创建token,注意需要先登录
QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表
SCHEDULE=定时
WARNING_TYPE=发送策略
WARNING_GROUP_ID=发送组ID
FAILURE_STRATEGY=失败策略
RECEIVERS=收件人
RECEIVERS_CC=收件人(抄送)
WORKER_GROUP_ID=Worker Server分组ID
PROCESS_INSTANCE_PRIORITY=流程实例优先级
UPDATE_SCHEDULE_NOTES=更新定时
SCHEDULE_ID=定时ID
ONLINE_SCHEDULE_NOTES=定时上线
OFFLINE_SCHEDULE_NOTES=定时下线
QUERY_SCHEDULE_NOTES=查询定时
QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时
LOGIN_TAG=用户登录相关操作
USER_NAME=用户名
PROJECT_NAME=项目名称
CREATE_PROCESS_DEFINITION_NOTES=创建流程定义
PROCESS_DEFINITION_NAME=流程定义名称
PROCESS_DEFINITION_JSON=流程定义详细信息(json格式)
PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式)
PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式)
PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式)
PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式)
PROCESS_DEFINITION_DESC=流程定义描述信息
PROCESS_DEFINITION_TAG=流程定义相关操作
SIGNOUT_NOTES=退出登录
USER_PASSWORD=用户密码
UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例
QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=验证流程定义名字
LOGIN_NOTES=用户登录
UPDATE_PROCCESS_DEFINITION_NOTES=更新流程定义
PROCESS_DEFINITION_ID=流程定义ID
RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID
QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表
QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义
PAGE_NO=页码号
PROCESS_INSTANCE_ID=流程实例ID
PROCESS_INSTANCE_IDS=流程实例ID集合
PROCESS_INSTANCE_JSON=流程实例信息(json格式)
SCHEDULE_TIME=定时时间
SYNC_DEFINE=更新流程实例的信息是否同步到流程定义
RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例
SEARCH_VAL=搜索值
USER_ID=用户ID
PAGE_SIZE=页大小
LIMIT=显示多少条
VIEW_TREE_NOTES=树状图
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID
PROCESS_DEFINITION_ID_LIST=流程定义id列表
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID
TASK_ID=任务实例ID
SKIP_LINE_NUM=忽略行数
QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志
USERS_TAG=用户相关操作
SCHEDULER_TAG=定时相关操作
CREATE_SCHEDULE_NOTES=创建定时
CREATE_USER_NOTES=创建用户
TENANT_ID=租户ID
QUEUE=使用的队列
EMAIL=邮箱
PHONE=手机号
QUERY_USER_LIST_NOTES=查询用户列表
UPDATE_USER_NOTES=更新用户
DELETE_USER_BY_ID_NOTES=删除用户通过ID
GRANT_PROJECT_NOTES=授权项目
PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割)
GRANT_RESOURCE_NOTES=授权资源文件
RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割)
GET_USER_INFO_NOTES=获取用户信息
LIST_USER_NOTES=用户列表
VERIFY_USER_NAME_NOTES=验证用户名
UNAUTHORIZED_USER_NOTES=取消授权
ALERT_GROUP_ID=报警组ID
AUTHORIZED_USER_NOTES=授权用户
GRANT_UDF_FUNC_NOTES=授权udf函数
UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割)
GRANT_DATASOURCE_NOTES=授权数据源
DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割)
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=查询子流程实例通过任务实例ID
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=查询父流程实例信息通过子流程实例ID
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量
VIEW_GANTT_NOTES=浏览Gantt图
SUB_PROCESS_INSTANCE_ID=子流程是咧ID
TASK_NAME=任务实例名
TASK_INSTANCE_TAG=任务实例相关操作
LOGGER_TAG=日志相关操作
PROCESS_INSTANCE_TAG=流程实例相关操作
EXECUTION_STATUS=工作流和任务节点的运行状态
HOST=运行任务的主机IP地址
START_DATE=开始时间
END_DATE=结束时间
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表
UPDATE_DATA_SOURCE_NOTES=更新数据源
DATA_SOURCE_ID=数据源ID
QUERY_DATA_SOURCE_NOTES=查询数据源通过ID
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=查询数据源列表通过数据源类型
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表
CONNECT_DATA_SOURCE_NOTES=连接数据源
CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试
DELETE_DATA_SOURCE_NOTES=删除数据源
VERIFY_DATA_SOURCE_NOTES=验证数据源
UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源
AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源
DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据

17
dockerfile/conf/dolphinscheduler/conf/mail_templates/alert_mail_template.ftl

@ -1,17 +0,0 @@
<#--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> dolphinscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html>

52
dockerfile/conf/dolphinscheduler/conf/master_logback.xml

@ -1,52 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-master.log</file>
<filter class="org.apache.dolphinscheduler.server.master.log.MasterLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="MASTERLOGFILE"/>
</root>
</configuration>

33
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/AccessTokenMapper.xml

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper">
<select id="selectAccessTokenPage" resultType="org.apache.dolphinscheduler.dao.entity.AccessToken">
select * from t_ds_access_token t
left join t_ds_user u on t.user_id = u.id
where 1 = 1
<if test="userName != null and userName != ''">
and u.user_name like concat ('%', #{userName}, '%')
</if>
<if test="userId != 0">
and t.user_id = #{userId}
</if>
order by t.update_time desc
</select>
</mapper>

47
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.xml

@ -1,47 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper">
<select id="queryAlertGroupPage" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
select * from t_ds_alertgroup
where 1 = 1
<if test="groupName != null and groupName != ''">
and group_name like concat('%', #{groupName}, '%')
</if>
order by update_time desc
</select>
<select id="queryByGroupName" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
select * from t_ds_alertgroup
where group_name=#{groupName}
</select>
<select id="queryByUserId" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
select * from t_ds_alertgroup t
left join t_ds_relation_user_alertgroup r on t.id=r.alertgroup_id
where r.user_id=#{userId}
</select>
<select id="queryByAlertType" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
select * from t_ds_alertgroup
where group_type=#{alertType}
</select>
<select id="queryAllGroupList" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
select *
from t_ds_alertgroup
order by update_time desc
</select>
</mapper>

26
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/AlertMapper.xml

@ -1,26 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertMapper">
<select id="listAlertByStatus" resultType="org.apache.dolphinscheduler.dao.entity.Alert">
select *
from t_ds_alert
where alert_status = #{alertStatus}
</select>
</mapper>

43
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/CommandMapper.xml

@ -1,43 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.CommandMapper">
<select id="getOneToRun" resultType="org.apache.dolphinscheduler.dao.entity.Command">
select command.* from t_ds_command command
join t_ds_process_definition definition on command.process_definition_id = definition.id
where definition.release_state = 1 AND definition.flag = 1
order by command.update_time asc
limit 1
</select>
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount">
select cmd.command_type as command_type, count(1) as count
from t_ds_command cmd, t_ds_process_definition process
where cmd.process_definition_id = process.id
<if test="projectIdArray != null and projectIdArray.length != 0">
and process.project_id in
<foreach collection="projectIdArray" index="index" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
<if test="startTime != null and endTime != null">
and cmd.start_time <![CDATA[ >= ]]> #{startTime} and cmd.update_time <![CDATA[ <= ]]> #{endTime}
</if>
group by cmd.command_type
</select>
</mapper>

79
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/DataSourceMapper.xml

@ -1,79 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.DataSourceMapper">
<select id="queryDataSourceByType" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
select *
from t_ds_datasource
where type=#{type}
<if test="userId != 0">
and id in
(select datasource_id
from t_ds_relation_datasource_user
where user_id=#{userId}
union select id as datasource_id
from t_ds_datasource
where user_id=#{userId}
)
</if>
</select>
<select id="selectPaging" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
select *
from t_ds_datasource
where 1 =1
<if test="userId != 0">
and id in
(select datasource_id
from t_ds_relation_datasource_user
where user_id=#{userId}
union select id as datasource_id
from t_ds_datasource
where user_id=#{userId}
)
</if>
<if test="name != null and name != ''">
and name like concat ('%', #{name}, '%')
</if>
order by update_time desc
</select>
<select id="queryDataSourceByName" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
select *
from t_ds_datasource
where name=#{name}
</select>
<select id="queryAuthedDatasource" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
select datasource.*
from t_ds_datasource datasource, t_ds_relation_datasource_user rel
where datasource.id = rel.datasource_id AND rel.user_id = #{userId}
</select>
<select id="queryDatasourceExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
select *
from t_ds_datasource
where user_id <![CDATA[ <> ]]> #{userId}
</select>
<select id="listAllDataSourceByType" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
select *
from t_ds_datasource
where type = #{type}
</select>
</mapper>

30
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/DataSourceUserMapper.xml

@ -1,30 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper">
<delete id="deleteByUserId">
delete from t_ds_relation_datasource_user
where user_id = #{userId}
</delete>
<delete id="deleteByDatasourceId">
delete from t_ds_relation_datasource_user
where datasource_id = #{datasourceId}
</delete>
</mapper>

36
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ErrorCommandMapper.xml

@ -1,36 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper">
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount">
select cmd.command_type as command_type, count(1) as count
from t_ds_error_command cmd, t_ds_process_definition process
where cmd.process_definition_id = process.id
<if test="projectIdArray != null and projectIdArray.length != 0">
and process.project_id in
<foreach collection="projectIdArray" index="index" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
<if test="startTime != null and endTime != null">
and cmd.startTime <![CDATA[ >= ]]> #{startTime} and cmd.update_time <![CDATA[ <= ]]> #{endTime}
</if>
group by cmd.command_type
</select>
</mapper>

96
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml

@ -1,96 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper">
<select id="queryByDefineName" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
select pd.*,u.user_name,p.name as project_name,t.tenant_code,t.tenant_name,q.queue,q.queue_name
from t_ds_process_definition pd
JOIN t_ds_user u ON pd.user_id = u.id
JOIN t_ds_project p ON pd.project_id = p.id
JOIN t_ds_tenant t ON t.id = u.tenant_id
JOIN t_ds_queue q ON t.queue_id = q.id
WHERE p.id = #{projectId}
and pd.name = #{processDefinitionName}
</select>
<select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
SELECT td.*,sc.schedule_release_state,tu.user_name
FROM t_ds_process_definition td
left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id
left join t_ds_user tu on td.user_id = tu.id
where td.project_id = #{projectId}
<if test=" isAdmin == false ">
and tu.user_type=1
</if>
<if test=" searchVal != null and searchVal != ''">
and td.name like concat('%', #{searchVal}, '%')
</if>
<if test=" userId != 0">
and td.user_id = #{userId}
</if>
order by sc.schedule_release_state desc,td.update_time desc
</select>
<select id="queryAllDefinitionList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
select *
from t_ds_process_definition
where project_id = #{projectId}
order by create_time desc
</select>
<select id="queryDefinitionListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
select *
from t_ds_process_definition
where tenant_id = #{tenantId}
</select>
<select id="queryDefinitionListByIdList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
select *
from t_ds_process_definition
where id in
<foreach collection="ids" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</select>
<select id="countDefinitionGroupByUser" resultType="org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser">
SELECT td.user_id as user_id, tu.user_name as user_name, count(0) as count
FROM t_ds_process_definition td
JOIN t_ds_user tu on tu.id=td.user_id
where 1 = 1
<if test=" isAdmin == false ">
and tu.user_type=1
</if>
<if test="projectIds != null and projectIds.length != 0">
and td.project_id in
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</if>
group by td.user_id,tu.user_name
</select>
<select id="queryByDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
SELECT
pd.*, u.user_name,
p.name AS project_name
FROM
t_ds_process_definition pd,
t_ds_user u,
t_ds_project p
WHERE
pd.user_id = u.id AND pd.project_id = p.id
AND pd.id = #{processDefineId}
</select>
</mapper>

43
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapMapper.xml

@ -1,43 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper">
<delete id="deleteByParentProcessId">
delete
from t_ds_relation_process_instance
where parent_process_instance_id=#{parentProcessId}
</delete>
<select id="queryByParentId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap">
select *
from t_ds_relation_process_instance
where parent_process_instance_id = #{parentProcessId}
and parent_task_instance_id = #{parentTaskId}
</select>
<select id="queryBySubProcessId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap">
select *
from t_ds_relation_process_instance
where process_instance_id = #{subProcessId}
</select>
<select id="querySubIdListByParentId" resultType="java.lang.Integer">
select process_instance_id
from t_ds_relation_process_instance
where parent_process_instance_id = #{parentInstanceId}
</select>
</mapper>

182
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml

@ -1,182 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper">
<select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select inst.*
from t_ds_process_instance inst
where inst.id = #{processId}
</select>
<select id="queryByHostAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="host != null and host != ''">
and host=#{host}
</if>
and state in
<foreach collection="states" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
order by id asc
</select>
<select id="queryByTenantIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="tenantId != -1">
and tenant_id =#{tenantId}
</if>
and state in
<foreach collection="states" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
order by id asc
</select>
<select id="queryByWorkerGroupIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="workerGroupId != -1">
and worker_group_id =#{workerGroupId}
</if>
and state in
<foreach collection="states" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
order by id asc
</select>
<select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select instance.*
from t_ds_process_instance instance
join t_ds_process_definition define ON instance.process_definition_id = define.id
where 1=1
and instance.is_sub_process=0
and define.project_id = #{projectId}
<if test="processDefinitionId != 0">
and instance.process_definition_id = #{processDefinitionId}
</if>
<if test="searchVal != null and searchVal != ''">
and instance.name like concat('%', #{searchVal}, '%')
</if>
<if test="startTime != null ">
and instance.start_time > #{startTime} and instance.start_time <![CDATA[ <=]]> #{endTime}
</if>
<if test="states != null and states != ''">
and instance.state in
<foreach collection="states" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</if>
<if test="host != null and host != ''">
and instance.host like concat('%', #{host}, '%')
</if>
order by instance.start_time desc
</select>
<update id="setFailoverByHostAndStateArray">
update t_ds_process_instance
set host=null
where host =#{host} and state in
<foreach collection="states" index="index" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</update>
<update id="updateProcessInstanceByState">
update t_ds_process_instance
set state = #{destState}
where state = #{originState}
</update>
<update id="updateProcessInstanceByTenantId">
update t_ds_process_instance
set tenant_id = #{destTenantId}
where tenant_id = #{originTenantId}
</update>
<update id="updateProcessInstanceByWorkerGroupId">
update t_ds_process_instance
set worker_group_id = #{destWorkerGroupId}
where worker_group_id = #{originWorkerGroupId}
</update>
<select id="countInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount">
select t.state, count(0) as count
from t_ds_process_instance t
join t_ds_process_definition d on d.id=t.process_definition_id
join t_ds_project p on p.id=d.project_id
where 1 = 1
and t.is_sub_process = 0
<if test="startTime != null and endTime != null">
and t.start_time >= #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime}
</if>
<if test="projectIds != null and projectIds.length != 0">
and p.id in
<foreach collection="projectIds" index="index" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
group by t.state
</select>
<select id="queryByProcessDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where process_definition_id=#{processDefinitionId}
order by start_time desc limit #{size}
</select>
<select id="queryLastSchedulerProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where process_definition_id=#{processDefinitionId}
<if test="startTime!=null and endTime != null ">
and schedule_time between #{startTime} and #{endTime}
</if>
order by end_time desc limit 1
</select>
<select id="queryLastRunningProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="states !=null and states.length != 0">
and state in
<foreach collection="states" item="i" index="index" open="(" separator="," close=")">
#{i}
</foreach>
</if>
<if test="startTime!=null and endTime != null ">
and process_definition_id=#{processDefinitionId}
and (schedule_time between #{startTime} and #{endTime} or start_time between #{startTime} and #{endTime})
</if>
order by start_time desc limit 1
</select>
<select id="queryLastManualProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where process_definition_id=#{processDefinitionId}
and schedule_time is null
<if test="startTime!=null and endTime != null ">
and start_time between #{startTime} and #{endTime}
</if>
order by end_time desc limit 1
</select>
</mapper>

68
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProjectMapper.xml

@ -1,68 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectMapper">
<select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.Project">
select p.*,u.user_name as user_name
from t_ds_project p
join t_ds_user u on p.user_id = u.id
where p.id = #{projectId}
</select>
<select id="queryByName" resultType="org.apache.dolphinscheduler.dao.entity.Project">
select p.*,u.user_name as user_name
from t_ds_project p
join t_ds_user u on p.user_id = u.id
where p.name = #{projectName}
limit 1
</select>
<select id="queryProjectListPaging" resultType="org.apache.dolphinscheduler.dao.entity.Project">
select p.*,u.user_name as user_name,
(SELECT COUNT(*) FROM t_ds_process_definition AS def WHERE def.project_id = p.id) AS def_count,
(SELECT COUNT(*) FROM t_ds_process_definition def, t_ds_process_instance inst WHERE def.id = inst.process_definition_id AND def.project_id = p.id AND inst.state=1 ) as inst_running_count
from t_ds_project p
join t_ds_user u on u.id=p.user_id
where 1=1
<if test="userId != 0">
and p.id in
(select project_id from t_ds_relation_project_user where user_id=#{userId}
union select id as project_id from t_ds_project where user_id=#{userId}
)
</if>
<if test="searchName!=null and searchName != ''">
and p.name like concat('%', #{searchName}, '%')
</if>
order by p.create_time desc
</select>
<select id="queryAuthedProjectListByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project">
select p.*
from t_ds_project p,t_ds_relation_project_user rel
where p.id = rel.project_id and rel.user_id= #{userId}
</select>
<select id="queryProjectExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project">
select *
from t_ds_project
where user_id <![CDATA[ <> ]]> #{userId}
</select>
<select id="queryProjectCreatedByUser" resultType="org.apache.dolphinscheduler.dao.entity.Project">
select *
from t_ds_project
where user_id = #{userId}
</select>
</mapper>

36
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProjectUserMapper.xml

@ -1,36 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper">
<delete id="deleteProjectRelation">
delete from t_ds_relation_project_user
where 1=1
and user_id = #{userId}
<if test="projectId != 0 ">
and project_id = #{projectId}
</if>
</delete>
<select id="queryProjectRelation" resultType="org.apache.dolphinscheduler.dao.entity.ProjectUser">
select *
from t_ds_relation_project_user
where project_id = #{projectId}
and user_id = #{userId}
limit 1
</select>
</mapper>

42
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/QueueMapper.xml

@ -1,42 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.QueueMapper">
<select id="queryQueuePaging" resultType="org.apache.dolphinscheduler.dao.entity.Queue">
select *
from t_ds_queue
where 1= 1
<if test="searchVal != null and searchVal != ''">
and queue_name like concat('%', #{searchVal}, '%')
</if>
order by update_time desc
</select>
<select id="queryAllQueueList" resultType="org.apache.dolphinscheduler.dao.entity.Queue">
select *
from t_ds_queue
where 1=1
<if test="queue != null and queue != ''">
and queue = #{queue}
</if>
<if test="queueName != null and queueName != ''">
and queue_name =#{queueName}
</if>
</select>
</mapper>

74
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml

@ -1,74 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ResourceMapper">
<select id="queryResourceList" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where 1= 1
<if test="alias != null and alias != ''">
and alias = #{alias}
</if>
<if test="type != -1">
and type = #{type}
</if>
<if test="userId != 0">
and user_id = #{userId}
</if>
</select>
<select id="queryResourceListAuthored" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where 1 = 1
<if test="type != -1">
and type=#{type}
</if>
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
union select id as resources_id from t_ds_resources where user_id=#{userId})
</select>
<select id="queryResourcePaging" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where type=#{type}
<if test="userId != 0">
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
union select id as resources_id from t_ds_resources where user_id=#{userId})
</if>
<if test="searchVal != null and searchVal != ''">
and alias like concat('%', #{searchVal}, '%')
</if>
order by update_time desc
</select>
<select id="queryAuthorizedResourceList" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select r.*
from t_ds_resources r,t_ds_relation_resources_user rel
where r.id = rel.resources_id AND rel.user_id = #{userId}
</select>
<select id="queryResourceExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where user_id <![CDATA[ <> ]]> #{userId}
</select>
<select id="queryTenantCodeByResourceName" resultType="java.lang.String">
select tenant_code
from t_ds_tenant t, t_ds_user u, t_ds_resources res
where t.id = u.tenant_id and u.id = res.user_id and res.type=0
and res.alias= #{resName}
</select>
</mapper>

32
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ResourceUserMapper.xml

@ -1,32 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper">
<delete id="deleteResourceUser">
delete
from t_ds_relation_resources_user
where 1 = 1
<if test="userId != 0">
and user_id = #{userId}
</if>
<if test="resourceId != 0">
and resources_id = #{resourceId}
</if>
</delete>
</mapper>

58
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ScheduleMapper.xml

@ -1,58 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ScheduleMapper">
<select id="queryByProcessDefineIdPaging" resultType="org.apache.dolphinscheduler.dao.entity.Schedule">
select p_f.name as process_definition_name, p.name as project_name,u.user_name,s.*
from t_ds_schedules s
join t_ds_process_definition p_f on s.process_definition_id = p_f.id
join t_ds_project as p on p_f.project_id = p.id
join t_ds_user as u on s.user_id = u.id
where 1=1
<if test="processDefinitionId!= 0">
and s.process_definition_id = #{processDefinitionId}
</if>
order by s.update_time desc
</select>
<select id="querySchedulerListByProjectName" resultType="org.apache.dolphinscheduler.dao.entity.Schedule">
select p_f.name as process_definition_name, p_f.description as definition_description, p.name as project_name,u.user_name,s.*
from t_ds_schedules s
join t_ds_process_definition p_f on s.process_definition_id = p_f.id
join t_ds_project as p on p_f.project_id = p.id
join t_ds_user as u on s.user_id = u.id
where p.name = #{projectName}
</select>
<select id="selectAllByProcessDefineArray" resultType="org.apache.dolphinscheduler.dao.entity.Schedule">
select *
from t_ds_schedules
where 1= 1
<if test="processDefineIds != null and processDefineIds.length != 0 ">
and process_definition_id in
<foreach collection="processDefineIds" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</if>
and release_state = 1
</select>
<select id="queryByProcessDefinitionId" resultType="org.apache.dolphinscheduler.dao.entity.Schedule">
select *
from t_ds_schedules
where process_definition_id =#{processDefinitionId}
</select>
</mapper>

32
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/SessionMapper.xml

@ -1,32 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.SessionMapper">
<select id="queryByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Session">
select *
from t_ds_session
where user_id = #{userId}
</select>
<select id="queryByUserIdAndIp" resultType="org.apache.dolphinscheduler.dao.entity.Session">
select *
from t_ds_session
where user_id = #{userId} AND ip = #{ip}
</select>
</mapper>

129
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/TaskInstanceMapper.xml

@ -1,129 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper">
<update id="setFailoverByHostAndStateArray">
update t_ds_task_instance
set state = #{destStatus}
where host = #{host}
and state in
<foreach collection="states" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</update>
<select id="queryTaskByProcessIdAndState" resultType="java.lang.Integer">
select id
from t_ds_task_instance
WHERE process_instance_id = #{processInstanceId}
and state = #{state}
and flag = 1
</select>
<select id="findValidTaskListByProcessId" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance">
select *
from t_ds_task_instance
WHERE process_instance_id = #{processInstanceId}
and flag = #{flag}
order by start_time desc
</select>
<select id="queryByHostAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance">
select *
from t_ds_task_instance
where 1 = 1
<if test="host != null and host != ''">
and host = #{host}
</if>
<if test="states != null and states.length != 0">
and state in
<foreach collection="states" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</if>
</select>
<select id="countTaskInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount">
select state, count(0) as count
from t_ds_task_instance t
left join t_ds_process_definition d on d.id=t.process_definition_id
left join t_ds_project p on p.id=d.project_id
where 1=1
<if test="projectIds != null and projectIds.length != 0">
and d.project_id in
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</if>
<if test="startTime != null and endTime != null">
and t.start_time > #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime}
</if>
group by t.state
</select>
<select id="queryByInstanceIdAndName" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance">
select *
from t_ds_task_instance
where process_instance_id = #{processInstanceId}
and name = #{name}
and flag = 1
limit 1
</select>
<select id="countTask" resultType="java.lang.Integer">
select count(1) as count
from t_ds_task_instance task,t_ds_process_definition process
where task.process_definition_id=process.id
<if test="projectIds != null and projectIds.length != 0">
and process.project_id in
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</if>
<if test="taskIds != null and taskIds.length != 0">
and task.id in
<foreach collection="taskIds" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</if>
</select>
<select id="queryTaskInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance">
select instance.*,process.name as process_instance_name
from t_ds_task_instance instance
join t_ds_process_definition define ON instance.process_definition_id = define.id
join t_ds_process_instance process on process.id=instance.process_instance_id
where define.project_id = #{projectId}
<if test="startTime != null">
and instance.start_time > #{startTime} and instance.start_time <![CDATA[ <=]]> #{endTime}
</if>
<if test="processInstanceId != 0">
and instance.process_instance_id = #{processInstanceId}
</if>
<if test="searchVal != null and searchVal != ''">
and instance.name like concat('%', #{searchVal}, '%')
</if>
<if test="taskName != null and taskName != ''">
and instance.name=#{taskName}
</if>
<if test="states != null and states.length != 0">
and instance.state in
<foreach collection="states" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</if>
<if test="host != null and host != ''">
and instance.host like concat('%', #{host}, '%')
</if>
order by instance.start_time desc
</select>
</mapper>

41
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/TenantMapper.xml

@ -1,41 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.TenantMapper">
<select id="queryById" resultType="org.apache.dolphinscheduler.dao.entity.Tenant">
SELECT t.*,q.queue_name,q.queue
FROM t_ds_tenant t,t_ds_queue q
WHERE t.queue_id = q.id
and t.id = #{tenantId}
</select>
<select id="queryByTenantCode" resultType="org.apache.dolphinscheduler.dao.entity.Tenant">
select *
from t_ds_tenant
where tenant_code = #{tenantCode}
</select>
<select id="queryTenantPaging" resultType="org.apache.dolphinscheduler.dao.entity.Tenant">
SELECT t.*,q.queue_name
FROM t_ds_tenant t,t_ds_queue q
WHERE t.queue_id = q.id
<if test="searchVal != null and searchVal != ''">
and t.tenant_name like concat('%', #{searchVal}, '%')
</if>
order by t.update_time desc
</select>
</mapper>

29
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UDFUserMapper.xml

@ -1,29 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UDFUserMapper">
<delete id="deleteByUserId">
delete from t_ds_relation_udfs_user
where user_id = #{userId}
</delete>
<delete id="deleteByUdfFuncId">
delete from t_ds_relation_udfs_user
where udf_id = #{udfFuncId}
</delete>
</mapper>

71
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml

@ -1,71 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper">
<select id="queryUdfByIdStr" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where 1 = 1
<if test="ids != null and ids != ''">
and id in
<foreach collection="ids" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
<if test="funcNames != null and funcNames != ''">
and func_name = #{funcNames}
</if>
order by id asc
</select>
<select id="queryUdfFuncPaging" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where 1=1
<if test="searchVal!= null and searchVal != ''">
and func_name like concat('%', #{searchVal}, '%')
</if>
<if test="userId != 0">
and id in (
select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
union select id as udf_id from t_ds_udfs where user_id=#{userId})
</if>
order by create_time desc
</select>
<select id="getUdfFuncByType" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where type=#{type}
<if test="userId != 0">
and id in (
select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
union select id as udf_id from t_ds_udfs where user_id=#{userId})
</if>
</select>
<select id="queryUdfFuncExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where user_id <![CDATA[ <> ]]> #{userId}
</select>
<select id="queryAuthedUdfFunc" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
SELECT u.*
from t_ds_udfs u,t_ds_relation_udfs_user rel
WHERE u.id = rel.udf_id
AND rel.user_id = #{userId}
</select>
</mapper>

31
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UserAlertGroupMapper.xml

@ -1,31 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserAlertGroupMapper">
<delete id="deleteByAlertgroupId">
delete from t_ds_relation_user_alertgroup
where alertgroup_id = #{alertgroupId}
</delete>
<select id="listUserByAlertgroupId" resultType="org.apache.dolphinscheduler.dao.entity.User">
SELECT u.*
FROM t_ds_relation_user_alertgroup g_u
JOIN t_ds_user u on g_u.user_id = u.id
WHERE g_u.alertgroup_id = #{alertgroupId}
</select>
</mapper>

72
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UserMapper.xml

@ -1,72 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserMapper">
<select id="queryAllGeneralUser" resultType="org.apache.dolphinscheduler.dao.entity.User">
select * from t_ds_user
where user_type=1;
</select>
<select id="queryByUserNameAccurately" resultType="org.apache.dolphinscheduler.dao.entity.User">
select * from t_ds_user
where user_name=#{userName}
</select>
<select id="queryUserByNamePassword" resultType="org.apache.dolphinscheduler.dao.entity.User">
select * from t_ds_user
where user_name=#{userName} and user_password = #{password}
</select>
<select id="queryUserPaging" resultType="org.apache.dolphinscheduler.dao.entity.User">
select u.id,u.user_name,u.user_password,u.user_type,u.email,u.phone,u.tenant_id,u.create_time,
u.update_time,t.tenant_name,
case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue, q.queue_name
from t_ds_user u
left join t_ds_tenant t on u.tenant_id=t.id
left join t_ds_queue q on t.queue_id = q.id
where 1=1
<if test="userName!=null and userName != ''" >
and u.user_name like concat ('%', #{userName}, '%')
</if>
order by u.update_time desc
</select>
<select id="queryDetailsById" resultType="org.apache.dolphinscheduler.dao.entity.User">
select u.*, t.tenant_name,
case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue_name
from t_ds_user u,t_ds_tenant t,t_ds_queue q
WHERE u.tenant_id = t.id and t.queue_id = q.id and u.id = #{userId}
</select>
<select id="queryUserListByAlertGroupId" resultType="org.apache.dolphinscheduler.dao.entity.User">
select u.*
from t_ds_user u, t_ds_relation_user_alertgroup rel
where u.id = rel.user_id AND rel.alertgroup_id = #{alertgroupId}
</select>
<select id="queryUserListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.User">
select *
from t_ds_user
where tenant_id = #{tenantId}
</select>
<select id="queryTenantCodeByUserId" resultType="org.apache.dolphinscheduler.dao.entity.User">
SELECT u.*,t.tenant_code
FROM t_ds_user u, t_ds_tenant t
WHERE u.tenant_id = t.id AND u.id = #{userId}
</select>
<select id="queryUserByToken" resultType="org.apache.dolphinscheduler.dao.entity.User">
select u.*
from t_ds_user u ,t_ds_access_token t
where u.id = t.user_id and token=#{token} and t.expire_time > NOW()
</select>
</mapper>

40
dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/WorkerGroupMapper.xml

@ -1,40 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper">
<select id="queryAllWorkerGroup" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup">
select *
from t_ds_worker_group
order by update_time desc
</select>
<select id="queryWorkerGroupByName" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup">
select *
from t_ds_worker_group
where name = #{name}
</select>
<select id="queryListPaging" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup">
select *
from t_ds_worker_group
where 1 = 1
<if test="searchVal != null and searchVal != ''">
and name like concat('%', #{searchVal}, '%')
</if>
order by update_time desc
</select>
</mapper>

79
dockerfile/conf/dolphinscheduler/conf/worker_logback.xml

@ -1,79 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.common.log.TaskLogFilter"></filter>
<Discriminator class="org.apache.dolphinscheduler.common.log.TaskLogDiscriminator">
<key>taskAppId</key>
<logBase>${log.base}</logBase>
</Discriminator>
<sift>
<appender name="FILE-${taskAppId}" class="ch.qos.logback.core.FileAppender">
<file>${log.base}/${taskAppId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
</sift>
</appender>
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-worker.log</file>
<filter class="org.apache.dolphinscheduler.common.log.WorkerLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
     
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
  
</appender>
<root level="INFO">
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/>
</root>
</configuration>

42
dockerfile/conf/dolphinscheduler/conf/zookeeper.properties

@ -1,42 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#zookeeper cluster
zookeeper.quorum=127.0.0.1:2181
#dolphinscheduler root directory
zookeeper.dolphinscheduler.root=/dolphinscheduler
#zookeeper server dirctory
zookeeper.dolphinscheduler.dead.servers=/dolphinscheduler/dead-servers
zookeeper.dolphinscheduler.masters=/dolphinscheduler/masters
zookeeper.dolphinscheduler.workers=/dolphinscheduler/workers
#zookeeper lock dirctory
zookeeper.dolphinscheduler.lock.masters=/dolphinscheduler/lock/masters
zookeeper.dolphinscheduler.lock.workers=/dolphinscheduler/lock/workers
#dolphinscheduler failover directory
zookeeper.dolphinscheduler.lock.failover.masters=/dolphinscheduler/lock/failover/masters
zookeeper.dolphinscheduler.lock.failover.workers=/dolphinscheduler/lock/failover/workers
zookeeper.dolphinscheduler.lock.failover.startup.masters=/dolphinscheduler/lock/failover/startup-masters
#dolphinscheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.sleep=1000
zookeeper.retry.maxtime=5

26
dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env vendored

@ -0,0 +1,26 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/opt/soft/python
export JAVA_HOME=/opt/soft/java
export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH

21
dockerfile/conf/dolphinscheduler/conf/quartz.properties → dockerfile/conf/dolphinscheduler/quartz.properties.tpl

@ -18,6 +18,13 @@
#============================================================================
# Configure Main Scheduler Properties
#============================================================================
#org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate
# postgre
org.quartz.dataSource.myDs.driver = org.postgresql.Driver
org.quartz.dataSource.myDs.URL = jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/dolphinscheduler?characterEncoding=utf8
org.quartz.dataSource.myDs.user = ${POSTGRESQL_USERNAME}
org.quartz.dataSource.myDs.password = ${POSTGRESQL_PASSWORD}
org.quartz.scheduler.instanceName = DolphinScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.makeSchedulerThreadDaemon = true
@ -26,7 +33,6 @@ org.quartz.jobStore.useProperties = false
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.makeThreadsDaemons = true
org.quartz.threadPool.threadCount = 25
@ -35,22 +41,17 @@ org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.acquireTriggersWithinLock=true
org.quartz.jobStore.dataSource = myDs
#============================================================================
# Configure Datasources
# Configure Datasources
#============================================================================
org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.dao.quartz.DruidConnectionProvider
org.quartz.dataSource.myDs.driver = org.postgresql.Driver
org.quartz.dataSource.myDs.URL=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
org.quartz.dataSource.myDs.user=root
org.quartz.dataSource.myDs.password=root@123
org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider
org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1
org.quartz.dataSource.myDs.validationQuery = select 1

263
dockerfile/conf/maven/settings.xml

@ -1,263 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<!--
| This is the configuration file for Maven. It can be specified at two levels:
|
| 1. User Level. This settings.xml file provides configuration for a single user,
| and is normally provided in ${user.home}/.m2/settings.xml.
|
| NOTE: This location can be overridden with the CLI option:
|
| -s /path/to/user/settings.xml
|
| 2. Global Level. This settings.xml file provides configuration for all Maven
| users on a machine (assuming they're all using the same Maven
| installation). It's normally provided in
| ${maven.home}/conf/settings.xml.
|
| NOTE: This location can be overridden with the CLI option:
|
| -gs /path/to/global/settings.xml
|
| The sections in this sample file are intended to give you a running start at
| getting the most out of your Maven installation. Where appropriate, the default
| values (values used when the setting is not specified) are provided.
|
|-->
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
<!-- localRepository
| The path to the local repository maven will use to store artifacts.
|
| Default: ${user.home}/.m2/repository
<localRepository>/path/to/local/repo</localRepository>
-->
<!-- interactiveMode
| This will determine whether maven prompts you when it needs input. If set to false,
| maven will use a sensible default value, perhaps based on some other setting, for
| the parameter in question.
|
| Default: true
<interactiveMode>true</interactiveMode>
-->
<!-- offline
| Determines whether maven should attempt to connect to the network when executing a build.
| This will have an effect on artifact downloads, artifact deployment, and others.
|
| Default: false
<offline>false</offline>
-->
<!-- pluginGroups
| This is a list of additional group identifiers that will be searched when resolving plugins by their prefix, i.e.
| when invoking a command line like "mvn prefix:goal". Maven will automatically add the group identifiers
| "org.apache.maven.plugins" and "org.codehaus.mojo" if these are not already contained in the list.
|-->
<pluginGroups>
<!-- pluginGroup
| Specifies a further group identifier to use for plugin lookup.
<pluginGroup>com.your.plugins</pluginGroup>
-->
</pluginGroups>
<!-- proxies
| This is a list of proxies which can be used on this machine to connect to the network.
| Unless otherwise specified (by system property or command-line switch), the first proxy
| specification in this list marked as active will be used.
|-->
<proxies>
<!-- proxy
| Specification for one proxy, to be used in connecting to the network.
|
<proxy>
<id>optional</id>
<active>true</active>
<protocol>http</protocol>
<username>proxyuser</username>
<password>proxypass</password>
<host>proxy.host.net</host>
<port>80</port>
<nonProxyHosts>local.net|some.host.com</nonProxyHosts>
</proxy>
-->
</proxies>
<!-- servers
| This is a list of authentication profiles, keyed by the server-id used within the system.
| Authentication profiles can be used whenever maven must make a connection to a remote server.
|-->
<servers>
<!-- server
| Specifies the authentication information to use when connecting to a particular server, identified by
| a unique name within the system (referred to by the 'id' attribute below).
|
| NOTE: You should either specify username/password OR privateKey/passphrase, since these pairings are
| used together.
|
<server>
<id>deploymentRepo</id>
<username>repouser</username>
<password>repopwd</password>
</server>
-->
<!-- Another sample, using keys to authenticate.
<server>
<id>siteServer</id>
<privateKey>/path/to/private/key</privateKey>
<passphrase>optional; leave empty if not used.</passphrase>
</server>
-->
</servers>
<!-- mirrors
| This is a list of mirrors to be used in downloading artifacts from remote repositories.
|
| It works like this: a POM may declare a repository to use in resolving certain artifacts.
| However, this repository may have problems with heavy traffic at times, so people have mirrored
| it to several places.
|
| That repository definition will have a unique id, so we can create a mirror reference for that
| repository, to be used as an alternate download site. The mirror site will be the preferred
| server for that repository.
|-->
<mirrors>
<!-- mirror
| Specifies a repository mirror site to use instead of a given repository. The repository that
| this mirror serves has an ID that matches the mirrorOf element of this mirror. IDs are used
| for inheritance and direct lookup purposes, and must be unique across the set of mirrors.
|
<mirror>
<id>mirrorId</id>
<mirrorOf>repositoryId</mirrorOf>
<name>Human Readable Name for this Mirror.</name>
<url>http://my.repository.com/repo/path</url>
</mirror>
-->
<mirror>
<id>nexus-aliyun</id>
<mirrorOf>central</mirrorOf>
<name>Nexus aliyun</name>
<url>http://maven.aliyun.com/nexus/content/groups/public</url>
</mirror>
</mirrors>
<!-- profiles
| This is a list of profiles which can be activated in a variety of ways, and which can modify
| the build process. Profiles provided in the settings.xml are intended to provide local machine-
| specific paths and repository locations which allow the build to work in the local environment.
|
| For example, if you have an integration testing plugin - like cactus - that needs to know where
| your Tomcat instance is installed, you can provide a variable here such that the variable is
| dereferenced during the build process to configure the cactus plugin.
|
| As noted above, profiles can be activated in a variety of ways. One way - the activeProfiles
| section of this document (settings.xml) - will be discussed later. Another way essentially
| relies on the detection of a system property, either matching a particular value for the property,
| or merely testing its existence. Profiles can also be activated by JDK version prefix, where a
| value of '1.4' might activate a profile when the build is executed on a JDK version of '1.4.2_07'.
| Finally, the list of active profiles can be specified directly from the command line.
|
| NOTE: For profiles defined in the settings.xml, you are restricted to specifying only artifact
| repositories, plugin repositories, and free-form properties to be used as configuration
| variables for plugins in the POM.
|
|-->
<profiles>
<!-- profile
| Specifies a set of introductions to the build process, to be activated using one or more of the
| mechanisms described above. For inheritance purposes, and to activate profiles via <activatedProfiles/>
| or the command line, profiles have to have an ID that is unique.
|
| An encouraged best practice for profile identification is to use a consistent naming convention
| for profiles, such as 'env-dev', 'env-test', 'env-production', 'user-jdcasey', 'user-brett', etc.
| This will make it more intuitive to understand what the set of introduced profiles is attempting
| to accomplish, particularly when you only have a list of profile id's for debug.
|
| This profile example uses the JDK version to trigger activation, and provides a JDK-specific repo.
<profile>
<id>jdk-1.4</id>
<activation>
<jdk>1.4</jdk>
</activation>
<repositories>
<repository>
<id>jdk14</id>
<name>Repository for JDK 1.4 builds</name>
<url>http://www.myhost.com/maven/jdk14</url>
<layout>default</layout>
<snapshotPolicy>always</snapshotPolicy>
</repository>
</repositories>
</profile>
-->
<!--
| Here is another profile, activated by the system property 'target-env' with a value of 'dev',
| which provides a specific path to the Tomcat instance. To use this, your plugin configuration
| might hypothetically look like:
|
| ...
| <plugin>
| <groupId>org.myco.myplugins</groupId>
| <artifactId>myplugin</artifactId>
|
| <configuration>
| <tomcatLocation>${tomcatPath}</tomcatLocation>
| </configuration>
| </plugin>
| ...
|
| NOTE: If you just wanted to inject this configuration whenever someone set 'target-env' to
| anything, you could just leave off the <value/> inside the activation-property.
|
<profile>
<id>env-dev</id>
<activation>
<property>
<name>target-env</name>
<value>dev</value>
</property>
</activation>
<properties>
<tomcatPath>/path/to/tomcat/instance</tomcatPath>
</properties>
</profile>
-->
</profiles>
<!-- activeProfiles
| List of profiles that are active for all builds.
|
<activeProfiles>
<activeProfile>alwaysActiveProfile</activeProfile>
<activeProfile>anotherAlwaysActiveProfile</activeProfile>
</activeProfiles>
-->
</settings>

4
dockerfile/conf/nginx/dolphinscheduler.conf

@ -21,11 +21,11 @@ server {
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /opt/dolphinscheduler_source/dolphinscheduler-ui/dist;
root /opt/dolphinscheduler/ui;
index index.html index.html;
}
location /dolphinscheduler {
proxy_pass http://127.0.0.1:12345;
proxy_pass http://FRONTEND_API_SERVER_HOST:FRONTEND_API_SERVER_PORT;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header x_real_ipP $remote_addr;

31
dockerfile/hooks/build

@ -16,9 +16,38 @@
# limitations under the License.
#
set -e
echo "------ dolphinscheduler start - build -------"
printenv
docker build --build-arg version=$version --build-arg tar_version=$tar_version -t $DOCKER_REPO:$version .
if [ -z "${VERSION}" ]
then
echo "set default environment variable [VERSION]"
VERSION=$(cat $(pwd)/sql/soft_version)
fi
if [ "${DOCKER_REPO}x" = "x" ]
then
echo "set default environment variable [DOCKER_REPO]"
DOCKER_REPO='dolphinscheduler'
fi
echo "Version: $VERSION"
echo "Repo: $DOCKER_REPO"
echo -e "Current Directory is $(pwd)\n"
# maven package(Project Directory)
echo -e "mvn -B clean compile package -Prelease -Dmaven.test.skip=true"
mvn -B clean compile package -Prelease -Dmaven.test.skip=true
# mv dolphinscheduler-bin.tar.gz file to dockerfile directory
echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz $(pwd)/dockerfile/\n"
mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz $(pwd)/dockerfile/
# docker build
echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/dockerfile/\n"
docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/dockerfile/
echo "------ dolphinscheduler end - build -------"

56
dockerfile/hooks/build.bat

@ -0,0 +1,56 @@
:: Licensed to the Apache Software Foundation (ASF) under one or more
:: contributor license agreements. See the NOTICE file distributed with
:: this work for additional information regarding copyright ownership.
:: The ASF licenses this file to You under the Apache License, Version 2.0
:: (the "License"); you may not use this file except in compliance with
:: the License. You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
@echo off
echo "------ dolphinscheduler start - build -------"
set
if not defined VERSION (
echo "set environment variable [VERSION]"
for /f %%l in (%cd%\sql\soft_version) do (set VERSION=%%l)
)
if not defined DOCKER_REPO (
echo "set environment variable [DOCKER_REPO]"
set DOCKER_REPO='dolphinscheduler'
)
echo "Version: %VERSION%"
echo "Repo: %DOCKER_REPO%"
echo "Current Directory is %cd%"
:: maven package(Project Directory)
echo "call mvn clean compile package -Prelease"
call mvn clean compile package -Prelease -DskipTests=true
if "%errorlevel%"=="1" goto :mvnFailed
:: move dolphinscheduler-bin.tar.gz file to dockerfile directory
echo "move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\"
move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\
:: docker build
echo "docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\"
docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\
if "%errorlevel%"=="1" goto :dockerBuildFailed
echo "------ dolphinscheduler end - build -------"
:mvnFailed
echo "MAVEN PACKAGE FAILED!"
:dockerBuildFailed
echo "DOCKER BUILD FAILED!"

27
dockerfile/hooks/check

@ -0,0 +1,27 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
echo "------ dolphinscheduler check - server - status -------"
sleep 20
server_num=$(docker top `docker container list | grep startup | awk '{print $1}'`| grep java | grep "dolphinscheduler" | awk -F 'classpath ' '{print $2}' | awk '{print $2}' | sort | uniq -c | wc -l)
if [ $server_num -eq 5 ]
then
echo "Server all start successfully"
else
echo "Server start failed "$server_num
exit 1
fi

2
dockerfile/hooks/push

@ -19,6 +19,6 @@
echo "------ push start -------"
printenv
docker push $DOCKER_REPO:$version
docker push $DOCKER_REPO:${VERSION}
echo "------ push end -------"

23
dockerfile/hooks/push.bat

@ -0,0 +1,23 @@
:: Licensed to the Apache Software Foundation (ASF) under one or more
:: contributor license agreements. See the NOTICE file distributed with
:: this work for additional information regarding copyright ownership.
:: The ASF licenses this file to You under the Apache License, Version 2.0
:: (the "License"); you may not use this file except in compliance with
:: the License. You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
@echo off
echo "------ push start -------"
set
docker push %DOCKER_REPO%:%VERSION%
echo "------ push end -------"

100
dockerfile/startup-init-conf.sh

@ -0,0 +1,100 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
echo "init env variables"
# Define parameters default value.
#============================================================================
# Database Source
#============================================================================
export POSTGRESQL_HOST=${POSTGRESQL_HOST:-"127.0.0.1"}
export POSTGRESQL_PORT=${POSTGRESQL_PORT:-"5432"}
export POSTGRESQL_USERNAME=${POSTGRESQL_USERNAME:-"root"}
export POSTGRESQL_PASSWORD=${POSTGRESQL_PASSWORD:-"root"}
#============================================================================
# System
#============================================================================
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"}
#============================================================================
# Zookeeper
#============================================================================
export TASK_QUEUE=${TASK_QUEUE:-"zookeeper"}
export ZOOKEEPER_QUORUM=${ZOOKEEPER_QUORUM:-"127.0.0.1:2181"}
#============================================================================
# Master Server
#============================================================================
export MASTER_EXEC_THREADS=${MASTER_EXEC_THREADS:-"100"}
export MASTER_EXEC_TASK_NUM=${MASTER_EXEC_TASK_NUM:-"20"}
export MASTER_HEARTBEAT_INTERVAL=${MASTER_HEARTBEAT_INTERVAL:-"10"}
export MASTER_TASK_COMMIT_RETRYTIMES=${MASTER_TASK_COMMIT_RETRYTIMES:-"5"}
export MASTER_TASK_COMMIT_INTERVAL=${MASTER_TASK_COMMIT_INTERVAL:-"1000"}
export MASTER_MAX_CPULOAD_AVG=${MASTER_MAX_CPULOAD_AVG:-"100"}
export MASTER_RESERVED_MEMORY=${MASTER_RESERVED_MEMORY:-"0.1"}
#============================================================================
# Worker Server
#============================================================================
export WORKER_EXEC_THREADS=${WORKER_EXEC_THREADS:-"100"}
export WORKER_HEARTBEAT_INTERVAL=${WORKER_HEARTBEAT_INTERVAL:-"10"}
export WORKER_FETCH_TASK_NUM=${WORKER_FETCH_TASK_NUM:-"3"}
export WORKER_MAX_CPULOAD_AVG=${WORKER_MAX_CPULOAD_AVG:-"100"}
export WORKER_RESERVED_MEMORY=${WORKER_RESERVED_MEMORY:-"0.1"}
#============================================================================
# Alert Server
#============================================================================
# XLS FILE
export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"}
# mail
export MAIL_SERVER_HOST=${MAIL_SERVER_HOST:-""}
export MAIL_SERVER_PORT=${MAIL_SERVER_PORT:-""}
export MAIL_SENDER=${MAIL_SENDER:-""}
export MAIL_USER=${MAIL_USER:-""}
export MAIL_PASSWD=${MAIL_PASSWD:-""}
export MAIL_SMTP_STARTTLS_ENABLE=${MAIL_SMTP_STARTTLS_ENABLE:-"true"}
export MAIL_SMTP_SSL_ENABLE=${MAIL_SMTP_SSL_ENABLE:-"false"}
export MAIL_SMTP_SSL_TRUST=${MAIL_SMTP_SSL_TRUST:-""}
# wechat
export ENTERPRISE_WECHAT_ENABLE=${ENTERPRISE_WECHAT_ENABLE:-"false"}
export ENTERPRISE_WECHAT_CORP_ID=${ENTERPRISE_WECHAT_CORP_ID:-""}
export ENTERPRISE_WECHAT_SECRET=${ENTERPRISE_WECHAT_SECRET:-""}
export ENTERPRISE_WECHAT_AGENT_ID=${ENTERPRISE_WECHAT_AGENT_ID:-""}
export ENTERPRISE_WECHAT_USERS=${ENTERPRISE_WECHAT_USERS:-""}
#============================================================================
# Frontend
#============================================================================
export FRONTEND_API_SERVER_HOST=${FRONTEND_API_SERVER_HOST:-"127.0.0.1"}
export FRONTEND_API_SERVER_PORT=${FRONTEND_API_SERVER_PORT:-"12345"}
echo "generate app config"
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
eval "cat << EOF
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
EOF
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
done
echo "generate nginx config"
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf

223
dockerfile/startup.sh

@ -17,59 +17,176 @@
#
set -e
echo "start postgresql service"
/etc/init.d/postgresql restart
echo "create user and init db"
sudo -u postgres psql <<'ENDSSH'
create user root with password 'root@123';
create database dolphinscheduler owner root;
grant all privileges on database dolphinscheduler to root;
\q
ENDSSH
echo "import sql data"
/opt/dolphinscheduler/script/create-dolphinscheduler.sh
/opt/zookeeper/bin/zkServer.sh restart
sleep 90
echo "start api-server"
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh stop api-server
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh start api-server
echo "start master-server"
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh stop master-server
python /opt/dolphinscheduler/script/del-zk-node.py 127.0.0.1 /dolphinscheduler/masters
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh start master-server
echo "start worker-server"
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh stop worker-server
python /opt/dolphinscheduler/script/del-zk-node.py 127.0.0.1 /dolphinscheduler/workers
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh start worker-server
echo "start logger-server"
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh stop logger-server
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh start logger-server
echo "start alert-server"
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh stop alert-server
/opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh start alert-server
echo "start nginx"
/etc/init.d/nginx stop
nginx &
DOLPHINSCHEDULER_BIN=${DOLPHINSCHEDULER_HOME}/bin
DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script
DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
# start postgresql
initPostgreSQL() {
echo "checking postgresql"
if [ -n "$(ifconfig | grep ${POSTGRESQL_HOST})" ]; then
echo "start postgresql service"
rc-service postgresql restart
# role if not exists, create
flag=$(sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='${POSTGRESQL_USERNAME}'")
if [ -z "${flag}" ]; then
echo "create user"
sudo -u postgres psql -tAc "create user ${POSTGRESQL_USERNAME} with password '${POSTGRESQL_PASSWORD}'"
fi
# database if not exists, create
flag=$(sudo -u postgres psql -tAc "select 1 from pg_database where datname='dolphinscheduler'")
if [ -z "${flag}" ]; then
echo "init db"
sudo -u postgres psql -tAc "create database dolphinscheduler owner ${POSTGRESQL_USERNAME}"
fi
# grant
sudo -u postgres psql -tAc "grant all privileges on database dolphinscheduler to ${POSTGRESQL_USERNAME}"
fi
echo "connect postgresql service"
v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1")
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Can't connect to database...${v}"
exit 1
fi
echo "import sql data"
${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh
}
# start zk
initZK() {
echo -e "checking zookeeper"
if [[ "${ZOOKEEPER_QUORUM}" = "127.0.0.1:2181" || "${ZOOKEEPER_QUORUM}" = "localhost:2181" ]]; then
echo "start local zookeeper"
/opt/zookeeper/bin/zkServer.sh restart
else
echo "connect remote zookeeper"
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
log "Error: Couldn't connect to zookeeper."
exit 1
fi
log "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 5
done
done
fi
}
# start nginx
initNginx() {
echo "start nginx"
nginx &
}
# start master-server
initMasterServer() {
echo "start master-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop master-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start master-server
}
# start worker-server
initWorkerServer() {
echo "start worker-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop worker-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start worker-server
}
# start api-server
initApiServer() {
echo "start api-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop api-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start api-server
}
# start logger-server
initLoggerServer() {
echo "start logger-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop logger-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start logger-server
}
# start alert-server
initAlertServer() {
echo "start alert-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop alert-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start alert-server
}
# print usage
printUsage() {
echo -e "Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system,"
echo -e "dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.\n"
echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server | frontend ]\n"
printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server, alert-server and frontend."
printf "%-13s: %s\n" "master-server" "MasterServer is mainly responsible for DAG task split, task submission monitoring."
printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services.."
printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests from the front-end UI layer."
printf "%-13s: %s\n" "alert-server" "AlertServer mainly include Alarms."
printf "%-13s: %s\n" "frontend" "Frontend mainly provides various visual operation interfaces of the system."
}
# init config file
source /root/startup-init-conf.sh
LOGFILE=/var/log/nginx/access.log
case "$1" in
(all)
initZK
initPostgreSQL
initMasterServer
initWorkerServer
initApiServer
initAlertServer
initLoggerServer
initNginx
LOGFILE=/var/log/nginx/access.log
;;
(master-server)
initZK
initPostgreSQL
initMasterServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log
;;
(worker-server)
initZK
initPostgreSQL
initWorkerServer
initLoggerServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log
;;
(api-server)
initPostgreSQL
initApiServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
;;
(alert-server)
initPostgreSQL
initAlertServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log
;;
(frontend)
initNginx
LOGFILE=/var/log/nginx/access.log
;;
(help)
printUsage
exit 1
;;
(*)
printUsage
exit 1
;;
esac
echo "tee begin"
exec tee ${LOGFILE}
while true
do
sleep 101
done
exec "$@"

4
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java

@ -20,7 +20,9 @@ package org.apache.dolphinscheduler.alert.utils;
* constants
*/
public class Constants {
private Constants() {
throw new IllegalStateException("Constants class");
}
/**
* alert properties path
*/

43
dolphinscheduler-api/pom.xml

@ -31,12 +31,6 @@
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
@ -129,13 +123,13 @@
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-rpc</artifactId>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
</dependency>
<dependency>
@ -152,6 +146,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
@ -162,11 +162,23 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<exclusions>
<exclusion>
<artifactId>servlet-api</artifactId>
<groupId>javax.servlet</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<exclusions>
<exclusion>
<artifactId>servlet-api</artifactId>
<groupId>javax.servlet</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
@ -174,14 +186,15 @@
<artifactId>hadoop-aws</artifactId>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<exclusions>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api-2.5</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- just for test -->

12
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessInstanceController.java

@ -22,12 +22,12 @@ import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.queue.ITaskQueue;
import org.apache.dolphinscheduler.common.queue.TaskQueueFactory;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import io.swagger.annotations.*;
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
import org.apache.dolphinscheduler.service.queue.TaskQueueFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -75,6 +75,7 @@ public class ProcessInstanceController extends BaseController{
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", type ="String"),
@ApiImplicitParam(name = "executorName", value = "EXECUTOR_NAME", type ="String"),
@ApiImplicitParam(name = "stateType", value = "EXECUTION_STATUS", type ="ExecutionStatus"),
@ApiImplicitParam(name = "host", value = "HOST", type ="String"),
@ApiImplicitParam(name = "startDate", value = "START_DATE", type ="String"),
@ -88,6 +89,7 @@ public class ProcessInstanceController extends BaseController{
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam(value = "processDefinitionId", required = false, defaultValue = "0") Integer processDefinitionId,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam(value = "executorName", required = false) String executorName,
@RequestParam(value = "stateType", required = false) ExecutionStatus stateType,
@RequestParam(value = "host", required = false) String host,
@RequestParam(value = "startDate", required = false) String startTime,
@ -96,12 +98,12 @@ public class ProcessInstanceController extends BaseController{
@RequestParam("pageSize") Integer pageSize){
try{
logger.info("query all process instance list, login user:{},project name:{}, define id:{}," +
"search value:{},state type:{},host:{},start time:{}, end time:{},page number:{}, page size:{}",
loginUser.getUserName(), projectName, processDefinitionId, searchVal, stateType,host,
"search value:{},executor name:{},state type:{},host:{},start time:{}, end time:{},page number:{}, page size:{}",
loginUser.getUserName(), projectName, processDefinitionId, searchVal, executorName,stateType,host,
startTime, endTime, pageNo, pageSize);
searchVal = ParameterUtils.handleEscapes(searchVal);
Map<String, Object> result = processInstanceService.queryProcessInstanceList(
loginUser, projectName, processDefinitionId, startTime, endTime, searchVal, stateType, host, pageNo, pageSize);
loginUser, projectName, processDefinitionId, startTime, endTime, searchVal, executorName, stateType, host, pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
logger.error(QUERY_PROCESS_INSTANCE_LIST_PAGING_ERROR.getMsg(),e);

8
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/TaskInstanceController.java

@ -69,6 +69,7 @@ public class TaskInstanceController extends BaseController{
@ApiImplicitParam(name = "processInstanceId", value = "PROCESS_INSTANCE_ID",required = false, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", type ="String"),
@ApiImplicitParam(name = "taskName", value = "TASK_NAME", type ="String"),
@ApiImplicitParam(name = "executorName", value = "EXECUTOR_NAME", type ="String"),
@ApiImplicitParam(name = "stateType", value = "EXECUTION_STATUS", type ="ExecutionStatus"),
@ApiImplicitParam(name = "host", value = "HOST", type ="String"),
@ApiImplicitParam(name = "startDate", value = "START_DATE", type ="String"),
@ -83,6 +84,7 @@ public class TaskInstanceController extends BaseController{
@RequestParam(value = "processInstanceId", required = false, defaultValue = "0") Integer processInstanceId,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam(value = "taskName", required = false) String taskName,
@RequestParam(value = "executorName", required = false) String executorName,
@RequestParam(value = "stateType", required = false) ExecutionStatus stateType,
@RequestParam(value = "host", required = false) String host,
@RequestParam(value = "startDate", required = false) String startTime,
@ -91,11 +93,11 @@ public class TaskInstanceController extends BaseController{
@RequestParam("pageSize") Integer pageSize){
try{
logger.info("query task instance list, project name:{},process instance:{}, search value:{},task name:{}, state type:{}, host:{}, start:{}, end:{}",
projectName, processInstanceId, searchVal, taskName, stateType, host, startTime, endTime);
logger.info("query task instance list, project name:{},process instance:{}, search value:{},task name:{}, executor name: {},state type:{}, host:{}, start:{}, end:{}",
projectName, processInstanceId, searchVal, taskName, executorName, stateType, host, startTime, endTime);
searchVal = ParameterUtils.handleEscapes(searchVal);
Map<String, Object> result = taskInstanceService.queryTaskListPaging(
loginUser, projectName, processInstanceId, taskName, startTime, endTime, searchVal, stateType, host, pageNo, pageSize);
loginUser, projectName, processInstanceId, taskName, executorName, startTime, endTime, searchVal, stateType, host, pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
logger.error(Status.QUERY_TASK_LIST_PAGING_ERROR.getMsg(),e);

137
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/log/LogClient.java

@ -1,137 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.log;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
import io.grpc.StatusRuntimeException;
import org.apache.dolphinscheduler.rpc.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.TimeUnit;
/**
* log client
*/
public class LogClient {
private static final Logger logger = LoggerFactory.getLogger(LogClient.class);
private final ManagedChannel channel;
private final LogViewServiceGrpc.LogViewServiceBlockingStub blockingStub;
/**
* construct client connecting to HelloWorld server at {@code host:port}
*
* @param host host
* @param port port
*/
public LogClient(String host, int port) {
this(ManagedChannelBuilder.forAddress(host, port)
// Channels are secure by default (via SSL/TLS). For the example we disable TLS to avoid
// needing certificates.
.usePlaintext(true));
}
/**
* construct client for accessing RouteGuide server using the existing channel
*
*/
LogClient(ManagedChannelBuilder<?> channelBuilder) {
/**
* set max read size
*/
channelBuilder.maxInboundMessageSize(Integer.MAX_VALUE);
channel = channelBuilder.build();
blockingStub = LogViewServiceGrpc.newBlockingStub(channel);
}
/**
* shutdown
*
* @throws InterruptedException InterruptedException
*/
public void shutdown() throws InterruptedException {
channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
}
/**
* roll view log
*
* @param path path
* @param skipLineNum skip line number
* @param limit limit
* @return log content
*/
public String rollViewLog(String path,int skipLineNum,int limit) {
logger.info("roll view log : path {},skipLineNum {} ,limit {}", path, skipLineNum, limit);
LogParameter pathParameter = LogParameter
.newBuilder()
.setPath(path)
.setSkipLineNum(skipLineNum)
.setLimit(limit)
.build();
RetStrInfo retStrInfo;
try {
retStrInfo = blockingStub.rollViewLog(pathParameter);
return retStrInfo.getMsg();
} catch (StatusRuntimeException e) {
logger.error("roll view log error", e);
return null;
}
}
/**
* view log
*
* @param path path
* @return log content
*/
public String viewLog(String path) {
logger.info("view log path {}",path);
PathParameter pathParameter = PathParameter.newBuilder().setPath(path).build();
RetStrInfo retStrInfo;
try {
retStrInfo = blockingStub.viewLog(pathParameter);
return retStrInfo.getMsg();
} catch (StatusRuntimeException e) {
logger.error("view log error", e);
return null;
}
}
/**
* get log size
*
* @param path log path
* @return log content bytes
*/
public byte[] getLogBytes(String path) {
logger.info("log path {}",path);
PathParameter pathParameter = PathParameter.newBuilder().setPath(path).build();
RetByteInfo retByteInfo;
try {
retByteInfo = blockingStub.getLogBytes(pathParameter);
return retByteInfo.getData().toByteArray();
} catch (StatusRuntimeException e) {
logger.error("log size error", e);
return null;
}
}
}

40
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/BaseDAGService.java

@ -20,12 +20,11 @@ import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import java.util.ArrayList;
import java.util.List;
/**
@ -48,41 +47,8 @@ public class BaseDAGService extends BaseService{
List<TaskNode> taskNodeList = processData.getTasks();
List<TaskNodeRelation> taskNodeRelations = new ArrayList<>();
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList);
//Traversing node information and building relationships
for (TaskNode taskNode : taskNodeList) {
String preTasks = taskNode.getPreTasks();
List<String> preTasksList = JSONUtils.toList(preTasks, String.class);
//if previous tasks not empty
if (preTasksList != null) {
for (String depNode : preTasksList) {
taskNodeRelations.add(new TaskNodeRelation(depNode, taskNode.getName()));
}
}
}
ProcessDag processDag = new ProcessDag();
processDag.setEdges(taskNodeRelations);
processDag.setNodes(taskNodeList);
// generate detail Dag, to be executed
DAG<String, TaskNode, TaskNodeRelation> dag = new DAG<>();
if (CollectionUtils.isNotEmpty(processDag.getNodes())) {
for (TaskNode node : processDag.getNodes()) {
dag.addNode(node.getName(), node);
}
}
if (CollectionUtils.isNotEmpty(processDag.getEdges())) {
for (TaskNodeRelation edge : processDag.getEdges()) {
dag.addEdge(edge.getStartNode(), edge.getEndNode());
}
}
return dag;
return DagHelper.buildDagGraph(processDag);
}
}

10
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataAnalysisService.java

@ -24,13 +24,13 @@ import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.queue.ITaskQueue;
import org.apache.dolphinscheduler.common.queue.TaskQueueFactory;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
import org.apache.dolphinscheduler.service.queue.TaskQueueFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -69,7 +69,7 @@ public class DataAnalysisService extends BaseService{
TaskInstanceMapper taskInstanceMapper;
@Autowired
ProcessDao processDao;
ProcessService processService;
/**
* statistical task instance status data
@ -296,7 +296,7 @@ public class DataAnalysisService extends BaseService{
if(projectId !=0){
projectIds.add(projectId);
}else if(loginUser.getUserType() == UserType.GENERAL_USER){
projectIds = processDao.getProjectIdListHavePerm(loginUser.getId());
projectIds = processService.getProjectIdListHavePerm(loginUser.getId());
if(projectIds.size() ==0 ){
projectIds.add(0);
}

3
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java

@ -21,10 +21,9 @@ import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.job.db.*;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.datasource.*;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.User;

42
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java

@ -25,12 +25,12 @@ import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.utils.cron.CronUtils;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -67,7 +67,7 @@ public class ExecutorService extends BaseService{
@Autowired
private ProcessDao processDao;
private ProcessService processService;
/**
* execute process instance
@ -117,7 +117,7 @@ public class ExecutorService extends BaseService{
}
if (!checkTenantSuitable(processDefinition)){
logger.error("there is not any vaild tenant for the process definition: id:{},name:{}, ",
logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
return result;
@ -186,13 +186,13 @@ public class ExecutorService extends BaseService{
return checkResult;
}
ProcessInstance processInstance = processDao.findProcessInstanceDetailById(processInstanceId);
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
return result;
}
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
ProcessDefinition processDefinition = processService.findProcessDefineById(processInstance.getProcessDefinitionId());
if(executeType != ExecuteType.STOP && executeType != ExecuteType.PAUSE){
result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionId());
if (result.get(Constants.STATUS) != Status.SUCCESS) {
@ -206,7 +206,7 @@ public class ExecutorService extends BaseService{
return checkResult;
}
if (!checkTenantSuitable(processDefinition)){
logger.error("there is not any vaild tenant for the process definition: id:{},name:{}, ",
logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
}
@ -227,7 +227,7 @@ public class ExecutorService extends BaseService{
} else {
processInstance.setCommandType(CommandType.STOP);
processInstance.addHistoryCmd(CommandType.STOP);
processDao.updateProcessInstance(processInstance);
processService.updateProcessInstance(processInstance);
result = updateProcessInstanceState(processInstanceId, ExecutionStatus.READY_STOP);
}
break;
@ -237,12 +237,12 @@ public class ExecutorService extends BaseService{
} else {
processInstance.setCommandType(CommandType.PAUSE);
processInstance.addHistoryCmd(CommandType.PAUSE);
processDao.updateProcessInstance(processInstance);
processService.updateProcessInstance(processInstance);
result = updateProcessInstanceState(processInstanceId, ExecutionStatus.READY_PAUSE);
}
break;
default:
logger.error(String.format("unknown execute type : %s", executeType.toString()));
logger.error("unknown execute type : {}", executeType);
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "unknown execute type");
break;
@ -257,7 +257,7 @@ public class ExecutorService extends BaseService{
*/
private boolean checkTenantSuitable(ProcessDefinition processDefinition) {
// checkTenantExists();
Tenant tenant = processDao.getTenantForProcess(processDefinition.getTenantId(),
Tenant tenant = processService.getTenantForProcess(processDefinition.getTenantId(),
processDefinition.getUserId());
if(tenant == null){
return false;
@ -319,7 +319,7 @@ public class ExecutorService extends BaseService{
private Map<String, Object> updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
Map<String, Object> result = new HashMap<>(5);
int update = processDao.updateProcessInstanceState(processInstanceId, executionStatus);
int update = processService.updateProcessInstanceState(processInstanceId, executionStatus);
if (update > 0) {
putMsg(result, Status.SUCCESS);
} else {
@ -347,12 +347,12 @@ public class ExecutorService extends BaseService{
CMDPARAM_RECOVER_PROCESS_ID_STRING, instanceId));
command.setExecutorId(loginUser.getId());
if(!processDao.verifyIsNeedCreateCommand(command)){
if(!processService.verifyIsNeedCreateCommand(command)){
putMsg(result, Status.PROCESS_INSTANCE_EXECUTING_COMMAND,processDefinitionId);
return result;
}
int create = processDao.createCommand(command);
int create = processService.createCommand(command);
if (create > 0) {
putMsg(result, Status.SUCCESS);
@ -376,7 +376,7 @@ public class ExecutorService extends BaseService{
putMsg(result,Status.REQUEST_PARAMS_NOT_VALID_ERROR,"process definition id");
}
List<Integer> ids = new ArrayList<>();
processDao.recurseFindSubProcessId(processDefineId, ids);
processService.recurseFindSubProcessId(processDefineId, ids);
Integer[] idArray = ids.toArray(new Integer[ids.size()]);
if (ids.size() > 0){
List<ProcessDefinition> processDefinitionList;
@ -506,9 +506,9 @@ public class ExecutorService extends BaseService{
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end));
command.setCommandParam(JSONUtils.toJson(cmdParam));
return processDao.createCommand(command);
return processService.createCommand(command);
}else if (runMode == RunMode.RUN_MODE_PARALLEL){
List<Schedule> schedules = processDao.queryReleaseSchedulerListByProcessDefinitionId(processDefineId);
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefineId);
List<Date> listDate = new LinkedList<>();
if(!CollectionUtils.isEmpty(schedules)){
for (Schedule item : schedules) {
@ -521,7 +521,7 @@ public class ExecutorService extends BaseService{
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(date));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(date));
command.setCommandParam(JSONUtils.toJson(cmdParam));
processDao.createCommand(command);
processService.createCommand(command);
}
return listDate.size();
}else{
@ -532,19 +532,19 @@ public class ExecutorService extends BaseService{
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(start));
command.setCommandParam(JSONUtils.toJson(cmdParam));
processDao.createCommand(command);
processService.createCommand(command);
start = DateUtils.getSomeDay(start, 1);
}
return runCunt;
}
}
}else{
logger.error("there is not vaild schedule date for the process definition: id:{},date:{}",
logger.error("there is not valid schedule date for the process definition: id:{},date:{}",
processDefineId, schedule);
}
}else{
command.setCommandParam(JSONUtils.toJson(cmdParam));
return processDao.createCommand(command);
return processService.createCommand(command);
}
return 0;

36
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java

@ -17,17 +17,19 @@
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.log.LogClient;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PreDestroy;
/**
* log service
*/
@ -37,7 +39,18 @@ public class LoggerService {
private static final Logger logger = LoggerFactory.getLogger(LoggerService.class);
@Autowired
private ProcessDao processDao;
private ProcessService processService;
private final LogClientService logClient;
public LoggerService(){
logClient = new LogClientService();
}
@PreDestroy
public void close(){
logClient.close();
}
/**
* view log
@ -49,7 +62,7 @@ public class LoggerService {
*/
public Result queryLog(int taskInstId, int skipLineNum, int limit) {
TaskInstance taskInstance = processDao.findTaskInstanceById(taskInstId);
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId);
if (taskInstance == null){
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
@ -64,12 +77,9 @@ public class LoggerService {
Result result = new Result(Status.SUCCESS.getCode(), Status.SUCCESS.getMsg());
logger.info("log host : {} , logPath : {} , logServer port : {}",host,taskInstance.getLogPath(),Constants.RPC_PORT);
LogClient logClient = new LogClient(host, Constants.RPC_PORT);
String log = logClient.rollViewLog(taskInstance.getLogPath(),skipLineNum,limit);
String log = logClient.rollViewLog(host, Constants.RPC_PORT, taskInstance.getLogPath(),skipLineNum,limit);
result.setData(log);
logger.info(log);
return result;
}
@ -80,17 +90,11 @@ public class LoggerService {
* @return log byte array
*/
public byte[] getLogBytes(int taskInstId) {
TaskInstance taskInstance = processDao.findTaskInstanceById(taskInstId);
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId);
if (taskInstance == null){
throw new RuntimeException("task instance is null");
}
String host = taskInstance.getHost();
if(StringUtils.isEmpty(host)){
throw new RuntimeException("task instance host is null");
}
LogClient logClient = new LogClient(host, Constants.RPC_PORT);
return logClient.getLogBytes(taskInstance.getLogPath());
return logClient.getLogBytes(host, Constants.RPC_PORT, taskInstance.getLogPath());
}
}

108
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java

@ -43,9 +43,10 @@ import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -94,7 +95,7 @@ public class ProcessDefinitionService extends BaseDAGService {
private ScheduleMapper scheduleMapper;
@Autowired
private ProcessDao processDao;
private ProcessService processService;
@Autowired
private WorkerGroupMapper workerGroupMapper;
@ -283,7 +284,7 @@ public class ProcessDefinitionService extends BaseDAGService {
if ((checkProcessJson.get(Constants.STATUS) != Status.SUCCESS)) {
return checkProcessJson;
}
ProcessDefinition processDefinition = processDao.findProcessDefineById(id);
ProcessDefinition processDefinition = processService.findProcessDefineById(id);
if (processDefinition == null) {
// check process definition exists
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, id);
@ -296,7 +297,7 @@ public class ProcessDefinitionService extends BaseDAGService {
putMsg(result, Status.SUCCESS);
}
ProcessDefinition processDefine = processDao.findProcessDefineById(id);
ProcessDefinition processDefine = processService.findProcessDefineById(id);
Date now = new Date();
processDefine.setId(id);
@ -442,6 +443,13 @@ public class ProcessDefinitionService extends BaseDAGService {
}
ReleaseState state = ReleaseState.getEnum(releaseState);
// check state
if (null == state) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "releaseState");
return result;
}
ProcessDefinition processDefinition = processDefineMapper.selectById(id);
switch (state) {
@ -458,7 +466,7 @@ public class ProcessDefinitionService extends BaseDAGService {
);
for(Schedule schedule:scheduleList){
logger.info("set schedule offline, schedule id: {}, process definition id: {}", project.getId(), schedule.getId(), id);
logger.info("set schedule offline, project id: {}, schedule id: {}, process definition id: {}", project.getId(), schedule.getId(), id);
// set status
schedule.setReleaseState(ReleaseState.OFFLINE);
scheduleMapper.updateById(schedule);
@ -941,11 +949,16 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
if (null == processData) {
logger.error("process data is null");
putMsg(result,Status.DATA_IS_NOT_VALID, processDefinitionJson);
return result;
}
List<TaskNode> taskNodeList = (processData.getTasks() == null) ? new ArrayList<>() : processData.getTasks();
result.put(Constants.DATA_LIST, taskNodeList);
@ -967,14 +980,13 @@ public class ProcessDefinitionService extends BaseDAGService {
Map<Integer, List<TaskNode>> taskNodeMap = new HashMap<>();
String[] idList = defineIdList.split(",");
List<String> definitionIdList = Arrays.asList(idList);
List<Integer> idIntList = new ArrayList<>();
for(String definitionId : definitionIdList) {
for(String definitionId : idList) {
idIntList.add(Integer.parseInt(definitionId));
}
Integer[] idArray = idIntList.toArray(new Integer[idIntList.size()]);
List<ProcessDefinition> processDefinitionList = processDefineMapper.queryDefinitionListByIdList(idArray);
if (processDefinitionList == null || processDefinitionList.size() ==0) {
if (CollectionUtils.isEmpty(processDefinitionList)) {
logger.info("process definition not exists");
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, defineIdList);
return result;
@ -1024,9 +1036,10 @@ public class ProcessDefinitionService extends BaseDAGService {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (processDefinition == null) {
if (null == processDefinition) {
logger.info("process define not exists");
throw new RuntimeException("process define not exists");
putMsg(result,Status.PROCESS_DEFINE_NOT_EXIST, processDefinition);
return result;
}
DAG<String, TaskNode, TaskNodeRelation> dag = genDagGraph(processDefinition);
/**
@ -1114,10 +1127,10 @@ public class ProcessDefinitionService extends BaseDAGService {
pTreeViewDto.getChildren().add(treeViewDto);
}
postNodeList = dag.getSubsequentNodes(nodeName);
if (postNodeList != null && postNodeList.size() > 0) {
if (CollectionUtils.isNotEmpty(postNodeList)) {
for (String nextNodeName : postNodeList) {
List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName);
if (treeViewDtoList != null && treeViewDtoList.size() > 0) {
if (CollectionUtils.isNotEmpty(treeViewDtoList)) {
treeViewDtoList.add(treeViewDto);
waitingRunningNodeMap.put(nextNodeName, treeViewDtoList);
} else {
@ -1129,7 +1142,6 @@ public class ProcessDefinitionService extends BaseDAGService {
}
runningNodeMap.remove(nodeName);
}
if (waitingRunningNodeMap == null || waitingRunningNodeMap.size() == 0) {
break;
} else {
@ -1154,75 +1166,29 @@ public class ProcessDefinitionService extends BaseDAGService {
private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) throws Exception {
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = processData.getTasks();
processDefinition.setGlobalParamList(processData.getGlobalParams());
//check process data
if (null != processData) {
List<TaskNode> taskNodeList = processData.getTasks();
processDefinition.setGlobalParamList(processData.getGlobalParams());
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList);
List<TaskNodeRelation> taskNodeRelations = new ArrayList<>();
// Traverse node information and build relationships
for (TaskNode taskNode : taskNodeList) {
String preTasks = taskNode.getPreTasks();
List<String> preTasksList = JSONUtils.toList(preTasks, String.class);
// If the dependency is not empty
if (preTasksList != null) {
for (String depNode : preTasksList) {
taskNodeRelations.add(new TaskNodeRelation(depNode, taskNode.getName()));
}
}
// Generate concrete Dag to be executed
return DagHelper.buildDagGraph(processDag);
}
ProcessDag processDag = new ProcessDag();
processDag.setEdges(taskNodeRelations);
processDag.setNodes(taskNodeList);
// Generate concrete Dag to be executed
return genDagGraph(processDag);
return new DAG<>();
}
/**
* Generate the DAG of process
*
* @return DAG
*/
private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDag processDag) {
DAG<String, TaskNode, TaskNodeRelation> dag = new DAG<>();
/**
* Add the ndoes
*/
if (CollectionUtils.isNotEmpty(processDag.getNodes())) {
for (TaskNode node : processDag.getNodes()) {
dag.addNode(node.getName(), node);
}
}
/**
* Add the edges
*/
if (CollectionUtils.isNotEmpty(processDag.getEdges())) {
for (TaskNodeRelation edge : processDag.getEdges()) {
dag.addEdge(edge.getStartNode(), edge.getEndNode());
}
}
return dag;
}
/**
* whether the graph has a ring
*
* @param taskNodeResponseList
* @return
* @param taskNodeResponseList task node response list
* @return if graph has cycle flag
*/
private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) {
DAG<String, TaskNode, String> graph = new DAG<>();

61
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java

@ -30,15 +30,15 @@ import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.queue.ITaskQueue;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.common.utils.placeholder.BusinessTimeUtils;
import org.apache.dolphinscheduler.dao.ProcessDao;
import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -72,7 +72,7 @@ public class ProcessInstanceService extends BaseDAGService {
ProjectService projectService;
@Autowired
ProcessDao processDao;
ProcessService processService;
@Autowired
ProcessInstanceMapper processInstanceMapper;
@ -95,6 +95,9 @@ public class ProcessInstanceService extends BaseDAGService {
@Autowired
WorkerGroupMapper workerGroupMapper;
@Autowired
UsersService usersService;
/**
* query process instance by id
*
@ -112,7 +115,7 @@ public class ProcessInstanceService extends BaseDAGService {
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessInstance processInstance = processDao.findProcessInstanceDetailById(processId);
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processId);
String workerGroupName = "";
if(processInstance.getWorkerGroupId() == -1){
workerGroupName = DEFAULT;
@ -125,7 +128,7 @@ public class ProcessInstanceService extends BaseDAGService {
}
}
processInstance.setWorkerGroupName(workerGroupName);
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
ProcessDefinition processDefinition = processService.findProcessDefineById(processInstance.getProcessDefinitionId());
processInstance.setReceivers(processDefinition.getReceivers());
processInstance.setReceiversCc(processDefinition.getReceiversCc());
result.put(Constants.DATA_LIST, processInstance);
@ -151,7 +154,7 @@ public class ProcessInstanceService extends BaseDAGService {
*/
public Map<String, Object> queryProcessInstanceList(User loginUser, String projectName, Integer processDefineId,
String startDate, String endDate,
String searchVal, ExecutionStatus stateType, String host,
String searchVal, String executorName,ExecutionStatus stateType, String host,
Integer pageNo, Integer pageSize) {
Map<String, Object> result = new HashMap<>(5);
@ -182,25 +185,31 @@ public class ProcessInstanceService extends BaseDAGService {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "startDate,endDate");
return result;
}
Page<ProcessInstance> page = new Page(pageNo, pageSize);
PageInfo pageInfo = new PageInfo<ProcessInstance>(pageNo, pageSize);
int executorId = usersService.getUserIdByName(executorName);
IPage<ProcessInstance> processInstanceList =
processInstanceMapper.queryProcessInstanceListPaging(page,
project.getId(), processDefineId, searchVal, statusArray, host, start, end);
project.getId(), processDefineId, searchVal, executorId,statusArray, host, start, end);
List<ProcessInstance> processInstances = processInstanceList.getRecords();
for(ProcessInstance processInstance: processInstances){
processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(),processInstance.getEndTime()));
User executor = usersService.queryUser(processInstance.getExecutorId());
if (null != executor) {
processInstance.setExecutorName(executor.getUserName());
}
}
Set<String> exclusionSet = new HashSet<String>();
Set<String> exclusionSet = new HashSet<>();
exclusionSet.add(Constants.CLASS);
exclusionSet.add("locations");
exclusionSet.add("connects");
exclusionSet.add("processInstanceJson");
PageInfo pageInfo = new PageInfo<ProcessInstance>(pageNo, pageSize);
pageInfo.setTotalCount((int) processInstanceList.getTotal());
pageInfo.setLists(CollectionUtils.getListByExclusion(processInstances, exclusionSet));
result.put(Constants.DATA_LIST, pageInfo);
@ -228,8 +237,8 @@ public class ProcessInstanceService extends BaseDAGService {
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessInstance processInstance = processDao.findProcessInstanceDetailById(processId);
List<TaskInstance> taskInstanceList = processDao.findValidTaskListByProcessId(processId);
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processId);
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processId);
AddDependResultForTaskList(taskInstanceList);
Map<String, Object> resultMap = new HashMap<>();
resultMap.put(PROCESS_INSTANCE_STATE, processInstance.getState().toString());
@ -304,7 +313,7 @@ public class ProcessInstanceService extends BaseDAGService {
return checkResult;
}
TaskInstance taskInstance = processDao.findTaskInstanceById(taskId);
TaskInstance taskInstance = processService.findTaskInstanceById(taskId);
if (taskInstance == null) {
putMsg(result, Status.TASK_INSTANCE_NOT_EXISTS, taskId);
return result;
@ -314,7 +323,7 @@ public class ProcessInstanceService extends BaseDAGService {
return result;
}
ProcessInstance subWorkflowInstance = processDao.findSubProcessInstance(
ProcessInstance subWorkflowInstance = processService.findSubProcessInstance(
taskInstance.getProcessInstanceId(), taskInstance.getId());
if (subWorkflowInstance == null) {
putMsg(result, Status.SUB_PROCESS_INSTANCE_NOT_EXIST, taskId);
@ -356,7 +365,7 @@ public class ProcessInstanceService extends BaseDAGService {
}
//check process instance exists
ProcessInstance processInstance = processDao.findProcessInstanceDetailById(processInstanceId);
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
return result;
@ -380,7 +389,7 @@ public class ProcessInstanceService extends BaseDAGService {
String globalParams = null;
String originDefParams = null;
int timeout = processInstance.getTimeout();
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
ProcessDefinition processDefinition = processService.findProcessDefineById(processInstance.getProcessDefinitionId());
if (StringUtils.isNotEmpty(processInstanceJson)) {
ProcessData processData = JSONUtils.parseObject(processInstanceJson, ProcessData.class);
//check workflow json is valid
@ -396,7 +405,7 @@ public class ProcessInstanceService extends BaseDAGService {
processInstance.getCmdTypeIfComplement(), schedule);
timeout = processData.getTimeout();
processInstance.setTimeout(timeout);
Tenant tenant = processDao.getTenantForProcess(processData.getTenantId(),
Tenant tenant = processService.getTenantForProcess(processData.getTenantId(),
processDefinition.getUserId());
if(tenant != null){
processInstance.setTenantCode(tenant.getTenantCode());
@ -406,7 +415,7 @@ public class ProcessInstanceService extends BaseDAGService {
}
// int update = processDao.updateProcessInstance(processInstanceId, processInstanceJson,
// globalParams, schedule, flag, locations, connects);
int update = processDao.updateProcessInstance(processInstance);
int update = processService.updateProcessInstance(processInstance);
int updateDefine = 1;
if (syncDefine && StringUtils.isNotEmpty(processInstanceJson)) {
processDefinition.setProcessDefinitionJson(processInstanceJson);
@ -445,7 +454,7 @@ public class ProcessInstanceService extends BaseDAGService {
return checkResult;
}
ProcessInstance subInstance = processDao.findProcessInstanceDetailById(subId);
ProcessInstance subInstance = processService.findProcessInstanceDetailById(subId);
if (subInstance == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, subId);
return result;
@ -455,7 +464,7 @@ public class ProcessInstanceService extends BaseDAGService {
return result;
}
ProcessInstance parentWorkflowInstance = processDao.findParentProcessInstance(subId);
ProcessInstance parentWorkflowInstance = processService.findParentProcessInstance(subId);
if (parentWorkflowInstance == null) {
putMsg(result, Status.SUB_PROCESS_INSTANCE_NOT_EXIST);
return result;
@ -476,7 +485,7 @@ public class ProcessInstanceService extends BaseDAGService {
* @return delete result code
*/
@Transactional(rollbackFor = Exception.class)
public Map<String, Object> deleteProcessInstanceById(User loginUser, String projectName, Integer processInstanceId,ITaskQueue tasksQueue) {
public Map<String, Object> deleteProcessInstanceById(User loginUser, String projectName, Integer processInstanceId, ITaskQueue tasksQueue) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
@ -486,8 +495,8 @@ public class ProcessInstanceService extends BaseDAGService {
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessInstance processInstance = processDao.findProcessInstanceDetailById(processInstanceId);
List<TaskInstance> taskInstanceList = processDao.findValidTaskListByProcessId(processInstanceId);
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstanceId);
if (null == processInstance) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
@ -512,7 +521,7 @@ public class ProcessInstanceService extends BaseDAGService {
.append(taskInstance.getId())
.append(UNDERLINE);
int taskWorkerGroupId = processDao.getTaskWorkerGroupId(taskInstance);
int taskWorkerGroupId = processService.getTaskWorkerGroupId(taskInstance);
WorkerGroup workerGroup = workerGroupMapper.selectById(taskWorkerGroupId);
if(workerGroup == null){
@ -541,9 +550,9 @@ public class ProcessInstanceService extends BaseDAGService {
}
// delete database cascade
int delete = processDao.deleteWorkProcessInstanceById(processInstanceId);
processDao.deleteAllSubWorkProcessByParentId(processInstanceId);
processDao.deleteWorkProcessMapByParentId(processInstanceId);
int delete = processService.deleteWorkProcessInstanceById(processInstanceId);
processService.deleteAllSubWorkProcessByParentId(processInstanceId);
processService.deleteWorkProcessMapByParentId(processInstanceId);
if (delete > 0) {
putMsg(result, Status.SUCCESS);

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java

@ -119,7 +119,7 @@ public class ResourcesService extends BaseService {
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.maxFileSize) {
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
@ -547,7 +547,7 @@ public class ResourcesService extends BaseService {
}
} catch (Exception e) {
logger.error(String.format("Resource %s read failed", hdfsFileName), e);
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}

22
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java

@ -26,7 +26,6 @@ import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
@ -34,11 +33,12 @@ import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.utils.cron.CronUtils;
import org.apache.dolphinscheduler.dao.quartz.ProcessScheduleJob;
import org.apache.dolphinscheduler.dao.quartz.QuartzExecutors;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.ProcessScheduleJob;
import org.apache.dolphinscheduler.service.quartz.QuartzExecutors;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -68,7 +68,7 @@ public class SchedulerService extends BaseService {
private MonitorService monitorService;
@Autowired
private ProcessDao processDao;
private ProcessService processService;
@Autowired
private ScheduleMapper scheduleMapper;
@ -119,7 +119,7 @@ public class SchedulerService extends BaseService {
}
// check work flow define release state
ProcessDefinition processDefinition = processDao.findProcessDefineById(processDefineId);
ProcessDefinition processDefinition = processService.findProcessDefineById(processDefineId);
result = executorService.checkProcessDefinitionValid(processDefinition, processDefineId);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
@ -221,7 +221,7 @@ public class SchedulerService extends BaseService {
return result;
}
ProcessDefinition processDefinition = processDao.findProcessDefineById(schedule.getProcessDefinitionId());
ProcessDefinition processDefinition = processService.findProcessDefineById(schedule.getProcessDefinitionId());
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, schedule.getProcessDefinitionId());
return result;
@ -321,7 +321,7 @@ public class SchedulerService extends BaseService {
putMsg(result, Status.SCHEDULE_CRON_REALEASE_NEED_NOT_CHANGE, scheduleStatus);
return result;
}
ProcessDefinition processDefinition = processDao.findProcessDefineById(scheduleObj.getProcessDefinitionId());
ProcessDefinition processDefinition = processService.findProcessDefineById(scheduleObj.getProcessDefinitionId());
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, scheduleObj.getProcessDefinitionId());
return result;
@ -338,7 +338,7 @@ public class SchedulerService extends BaseService {
}
// check sub process definition release state
List<Integer> subProcessDefineIds = new ArrayList<>();
processDao.recurseFindSubProcessId(scheduleObj.getProcessDefinitionId(), subProcessDefineIds);
processService.recurseFindSubProcessId(scheduleObj.getProcessDefinitionId(), subProcessDefineIds);
Integer[] idArray = subProcessDefineIds.toArray(new Integer[subProcessDefineIds.size()]);
if (subProcessDefineIds.size() > 0){
List<ProcessDefinition> subProcessDefinitionList =
@ -423,7 +423,7 @@ public class SchedulerService extends BaseService {
return result;
}
ProcessDefinition processDefinition = processDao.findProcessDefineById(processDefineId);
ProcessDefinition processDefinition = processService.findProcessDefineById(processDefineId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefineId);
return result;
@ -472,7 +472,7 @@ public class SchedulerService extends BaseService {
logger.info("set schedule, project id: {}, scheduleId: {}", projectId, scheduleId);
Schedule schedule = processDao.querySchedule(scheduleId);
Schedule schedule = processService.querySchedule(scheduleId);
if (schedule == null) {
logger.warn("process schedule info not exists");
return;

33
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java

@ -17,6 +17,8 @@
package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.common.Constants;
@ -24,15 +26,12 @@ import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -56,11 +55,17 @@ public class TaskInstanceService extends BaseService {
ProjectService projectService;
@Autowired
ProcessDao processDao;
ProcessService processService;
@Autowired
TaskInstanceMapper taskInstanceMapper;
@Autowired
ProcessInstanceService processInstanceService;
@Autowired
UsersService usersService;
/**
* query task list by project, process instance, task name, task start time, task end time, task status, keyword paging
@ -79,8 +84,8 @@ public class TaskInstanceService extends BaseService {
* @return task list page
*/
public Map<String,Object> queryTaskListPaging(User loginUser, String projectName,
Integer processInstanceId, String taskName, String startDate, String endDate,
String searchVal, ExecutionStatus stateType,String host,
Integer processInstanceId, String taskName, String executorName, String startDate,
String endDate, String searchVal, ExecutionStatus stateType,String host,
Integer pageNo, Integer pageSize) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
@ -112,17 +117,23 @@ public class TaskInstanceService extends BaseService {
}
Page<TaskInstance> page = new Page(pageNo, pageSize);
PageInfo pageInfo = new PageInfo<TaskInstance>(pageNo, pageSize);
int executorId = usersService.getUserIdByName(executorName);
IPage<TaskInstance> taskInstanceIPage = taskInstanceMapper.queryTaskInstanceListPaging(
page, project.getId(), processInstanceId, searchVal, taskName, statusArray, host, start, end
page, project.getId(), processInstanceId, searchVal, taskName, executorId, statusArray, host, start, end
);
PageInfo pageInfo = new PageInfo<ProcessInstance>(pageNo, pageSize);
Set<String> exclusionSet = new HashSet<>();
exclusionSet.add(Constants.CLASS);
exclusionSet.add("taskJson");
List<TaskInstance> taskInstanceList = taskInstanceIPage.getRecords();
for(TaskInstance taskInstance : taskInstanceList){
taskInstance.setDuration(DateUtils.differSec(taskInstance.getStartTime(),
taskInstance.getEndTime()));
taskInstance.setDuration(DateUtils.differSec(taskInstance.getStartTime(), taskInstance.getEndTime()));
User executor = usersService.queryUser(taskInstance.getExecutorId());
if (null != executor) {
taskInstance.setExecutorName(executor.getUserName());
}
}
pageInfo.setTotalCount((int)taskInstanceIPage.getTotal());
pageInfo.setLists(CollectionUtils.getListByExclusion(taskInstanceIPage.getRecords(),exclusionSet));

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save