Browse Source

Merge pull request #6 from apache/dev

update code
pull/2/head
BoYiZhang 4 years ago committed by GitHub
parent
commit
f644bab7b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 22
      .github/workflows/ci_backend.yml
  2. 1
      .github/workflows/ci_frontend.yml
  3. 3
      .github/workflows/ci_ut.yml
  4. 125
      .gitignore
  5. 2
      README.md
  6. 68
      ambari_plugin/README.md
  7. 4
      ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-common.xml
  8. 164
      ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json
  9. 143
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-alert.xml
  10. 87
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-application-api.xml
  11. 158
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-common.xml
  12. 467
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-datasource.xml
  13. 123
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-env.xml
  14. 88
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-master.xml
  15. 126
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-quartz.xml
  16. 76
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-worker.xml
  17. 84
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-zookeeper.xml
  18. 137
      ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml
  19. 124
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py
  20. 61
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_alert_service.py
  21. 70
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_api_service.py
  22. 123
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_env.py
  23. 61
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_logger_service.py
  24. 61
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_master_service.py
  25. 60
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_worker_service.py
  26. 154
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/params.py
  27. 31
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/service_check.py
  28. 23
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/status_params.py
  29. 20
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/alert.properties.j2
  30. 20
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/application-api.properties.j2
  31. 20
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/common.properties.j2
  32. 20
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/datasource.properties.j2
  33. 116
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/dolphin-daemon.sh.j2
  34. 20
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/master.properties.j2
  35. 20
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/quartz.properties.j2
  36. 20
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/worker.properties.j2
  37. 20
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/zookeeper.properties.j2
  38. 26
      ambari_plugin/common-services/DOLPHIN/1.3.0/quicklinks/quicklinks.json
  39. 661
      ambari_plugin/common-services/DOLPHIN/1.3.0/themes/theme.json
  40. BIN
      ambari_plugin/readme.pdf
  41. 2
      ambari_plugin/statcks/DOLPHIN/metainfo.xml
  42. 226
      charts/dolphinscheduler/README.md
  43. 1
      docker/docker-compose.yml
  44. 233
      docker/docker-swarm/docker-compose.yml
  45. 230
      docker/docker-swarm/docker-stack.yml
  46. 20
      docker/docker-swarm/dolphinscheduler_env.sh
  47. 24
      docker/kubernetes/dolphinscheduler/Chart.yaml
  48. 4
      docker/kubernetes/dolphinscheduler/README.md
  49. 25
      docker/kubernetes/dolphinscheduler/requirements.yaml
  50. 0
      docker/kubernetes/dolphinscheduler/templates/NOTES.txt
  51. 16
      docker/kubernetes/dolphinscheduler/templates/_helpers.tpl
  52. 0
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
  53. 2
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml
  54. 4
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml
  55. 18
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
  56. 12
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
  57. 0
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
  58. 0
      docker/kubernetes/dolphinscheduler/templates/ingress.yaml
  59. 0
      docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml
  60. 0
      docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml
  61. 0
      docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml
  62. 0
      docker/kubernetes/dolphinscheduler/templates/secret-external-postgresql.yaml
  63. 26
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
  64. 35
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
  65. 0
      docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml
  66. 0
      docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml
  67. 6
      docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml
  68. 4
      docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml
  69. 7
      docker/kubernetes/dolphinscheduler/values.yaml
  70. 3
      dockerfile/Dockerfile
  71. 12
      dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env.sh
  72. 52
      dockerfile/conf/dolphinscheduler/logback/logback-alert.xml
  73. 62
      dockerfile/conf/dolphinscheduler/logback/logback-api.xml
  74. 82
      dockerfile/conf/dolphinscheduler/logback/logback-master.xml
  75. 26
      dockerfile/conf/dolphinscheduler/logback/logback-worker.xml
  76. 6
      dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl
  77. 23
      dockerfile/startup.sh
  78. 30
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/AlertServer.java
  79. 5
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EmailManager.java
  80. 10
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EnterpriseWeChatManager.java
  81. 133
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java
  82. 130
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java
  83. 17
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
  84. 17
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtils.java
  85. 16
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java
  86. 2
      dolphinscheduler-alert/src/main/resources/alert.properties
  87. 80
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java
  88. 27
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java
  89. 10
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/MailUtilsTest.java
  90. 67
      dolphinscheduler-alert/src/test/resources/alert.properties
  91. 48
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
  92. 21
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
  93. 56
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/exceptions/ServiceException.java
  94. 33
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java
  95. 33
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java
  96. 307
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
  97. 93
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
  98. 1
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java
  99. 14
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/ZooKeeperState.java
  100. 6
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/ZookeeperMonitor.java
  101. Some files were not shown because too many files have changed in this diff Show More

22
.github/workflows/ci_backend.yml

@ -56,21 +56,9 @@ jobs:
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Check license
run: ./mvnw -B apache-rat:check
- name: Compile
run: mvn -B clean compile package -Prelease -Dmaven.test.skip=true
License-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# In the checkout@v2, it doesn't support git submodule. Execute the commands manually.
- name: checkout submodules
shell: bash
run: |
git submodule sync --recursive
git -c protocol.version=2 submodule update --init --force --recursive --depth=1
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Check
run: mvn -B apache-rat:check
run: mvn -B clean compile install -Prelease -Dmaven.test.skip=true
- name: Check dependency license
run: tools/dependencies/check-LICENSE.sh

1
.github/workflows/ci_frontend.yml

@ -50,6 +50,7 @@ jobs:
cd dolphinscheduler-ui
npm install node-sass --unsafe-perm
npm install
npm run lint
npm run build
License-check:

3
.github/workflows/ci_ut.yml

@ -75,6 +75,7 @@ jobs:
-Dsonar.core.codeCoveragePlugin=jacoco
-Dsonar.projectKey=apache-dolphinscheduler
-Dsonar.login=e4058004bc6be89decf558ac819aa1ecbee57682
-Dsonar.exclusions=dolphinscheduler-ui/src/**/i18n/locale/*.js
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
@ -83,4 +84,4 @@ jobs:
mkdir -p ${LOG_DIR}
cd ${DOCKER_DIR}
docker-compose logs db > ${LOG_DIR}/db.txt
continue-on-error: true
continue-on-error: true

125
.gitignore vendored

@ -4,27 +4,24 @@
.zip
.gz
.DS_Store
.idea
.idea/
.idea/*
.target
.target/
**/**/target/**
target/*
*/target
*/target/*
dist/
all-dependencies.txt
self-modules.txt
third-party-dependencies.txt
**/target/
.settings
.nbproject
.classpath
.project
*.iml
**/*.iml
*.ipr
*.iws
*.tgz
.*.swp
.vim
.tmp
node_modules
**/node_modules
npm-debug.log
.vscode
logs/*
@ -41,110 +38,10 @@ dolphinscheduler-alert/logs/
dolphinscheduler-alert/src/main/resources/alert.properties_bak
dolphinscheduler-alert/src/main/resources/logback.xml
dolphinscheduler-server/src/main/resources/logback.xml
dolphinscheduler-ui/dist
dolphinscheduler-ui/dist/
dolphinscheduler-ui/node
dolphinscheduler-ui/dist/css/common.16ac5d9.css
dolphinscheduler-ui/dist/css/home/index.b444b91.css
dolphinscheduler-ui/dist/css/login/index.5866c64.css
dolphinscheduler-ui/dist/js/0.ac94e5d.js
dolphinscheduler-ui/dist/js/0.ac94e5d.js.map
dolphinscheduler-ui/dist/js/1.0b043a3.js
dolphinscheduler-ui/dist/js/1.0b043a3.js.map
dolphinscheduler-ui/dist/js/10.1bce3dc.js
dolphinscheduler-ui/dist/js/10.1bce3dc.js.map
dolphinscheduler-ui/dist/js/11.79f04d8.js
dolphinscheduler-ui/dist/js/11.79f04d8.js.map
dolphinscheduler-ui/dist/js/12.420daa5.js
dolphinscheduler-ui/dist/js/12.420daa5.js.map
dolphinscheduler-ui/dist/js/13.e5bae1c.js
dolphinscheduler-ui/dist/js/13.e5bae1c.js.map
dolphinscheduler-ui/dist/js/14.f2a0dca.js
dolphinscheduler-ui/dist/js/14.f2a0dca.js.map
dolphinscheduler-ui/dist/js/15.45373e8.js
dolphinscheduler-ui/dist/js/15.45373e8.js.map
dolphinscheduler-ui/dist/js/16.fecb0fc.js
dolphinscheduler-ui/dist/js/16.fecb0fc.js.map
dolphinscheduler-ui/dist/js/17.84be279.js
dolphinscheduler-ui/dist/js/17.84be279.js.map
dolphinscheduler-ui/dist/js/18.307ea70.js
dolphinscheduler-ui/dist/js/18.307ea70.js.map
dolphinscheduler-ui/dist/js/19.144db9c.js
dolphinscheduler-ui/dist/js/19.144db9c.js.map
dolphinscheduler-ui/dist/js/2.8b4ef29.js
dolphinscheduler-ui/dist/js/2.8b4ef29.js.map
dolphinscheduler-ui/dist/js/20.4c527e9.js
dolphinscheduler-ui/dist/js/20.4c527e9.js.map
dolphinscheduler-ui/dist/js/21.831b2a2.js
dolphinscheduler-ui/dist/js/21.831b2a2.js.map
dolphinscheduler-ui/dist/js/22.2b4bb2a.js
dolphinscheduler-ui/dist/js/22.2b4bb2a.js.map
dolphinscheduler-ui/dist/js/23.81467ef.js
dolphinscheduler-ui/dist/js/23.81467ef.js.map
dolphinscheduler-ui/dist/js/24.54a00e4.js
dolphinscheduler-ui/dist/js/24.54a00e4.js.map
dolphinscheduler-ui/dist/js/25.8d7bd36.js
dolphinscheduler-ui/dist/js/25.8d7bd36.js.map
dolphinscheduler-ui/dist/js/26.2ec5e78.js
dolphinscheduler-ui/dist/js/26.2ec5e78.js.map
dolphinscheduler-ui/dist/js/27.3ab48c2.js
dolphinscheduler-ui/dist/js/27.3ab48c2.js.map
dolphinscheduler-ui/dist/js/28.363088a.js
dolphinscheduler-ui/dist/js/28.363088a.js.map
dolphinscheduler-ui/dist/js/29.6c5853a.js
dolphinscheduler-ui/dist/js/29.6c5853a.js.map
dolphinscheduler-ui/dist/js/3.a0edb5b.js
dolphinscheduler-ui/dist/js/3.a0edb5b.js.map
dolphinscheduler-ui/dist/js/30.940fdd3.js
dolphinscheduler-ui/dist/js/30.940fdd3.js.map
dolphinscheduler-ui/dist/js/31.168a460.js
dolphinscheduler-ui/dist/js/31.168a460.js.map
dolphinscheduler-ui/dist/js/32.8df6594.js
dolphinscheduler-ui/dist/js/32.8df6594.js.map
dolphinscheduler-ui/dist/js/33.4480bbe.js
dolphinscheduler-ui/dist/js/33.4480bbe.js.map
dolphinscheduler-ui/dist/js/34.b407fe1.js
dolphinscheduler-ui/dist/js/34.b407fe1.js.map
dolphinscheduler-ui/dist/js/35.f340b0a.js
dolphinscheduler-ui/dist/js/35.f340b0a.js.map
dolphinscheduler-ui/dist/js/36.8880c2d.js
dolphinscheduler-ui/dist/js/36.8880c2d.js.map
dolphinscheduler-ui/dist/js/37.ea2a25d.js
dolphinscheduler-ui/dist/js/37.ea2a25d.js.map
dolphinscheduler-ui/dist/js/38.98a59ee.js
dolphinscheduler-ui/dist/js/38.98a59ee.js.map
dolphinscheduler-ui/dist/js/39.a5e958a.js
dolphinscheduler-ui/dist/js/39.a5e958a.js.map
dolphinscheduler-ui/dist/js/4.4ca44db.js
dolphinscheduler-ui/dist/js/4.4ca44db.js.map
dolphinscheduler-ui/dist/js/40.e187b1e.js
dolphinscheduler-ui/dist/js/40.e187b1e.js.map
dolphinscheduler-ui/dist/js/41.0e89182.js
dolphinscheduler-ui/dist/js/41.0e89182.js.map
dolphinscheduler-ui/dist/js/42.341047c.js
dolphinscheduler-ui/dist/js/42.341047c.js.map
dolphinscheduler-ui/dist/js/43.27b8228.js
dolphinscheduler-ui/dist/js/43.27b8228.js.map
dolphinscheduler-ui/dist/js/44.e8869bc.js
dolphinscheduler-ui/dist/js/44.e8869bc.js.map
dolphinscheduler-ui/dist/js/45.8d54901.js
dolphinscheduler-ui/dist/js/45.8d54901.js.map
dolphinscheduler-ui/dist/js/5.e1ed7f3.js
dolphinscheduler-ui/dist/js/5.e1ed7f3.js.map
dolphinscheduler-ui/dist/js/6.241ba07.js
dolphinscheduler-ui/dist/js/6.241ba07.js.map
dolphinscheduler-ui/dist/js/7.ab2e297.js
dolphinscheduler-ui/dist/js/7.ab2e297.js.map
dolphinscheduler-ui/dist/js/8.83ff814.js
dolphinscheduler-ui/dist/js/8.83ff814.js.map
dolphinscheduler-ui/dist/js/9.39cb29f.js
dolphinscheduler-ui/dist/js/9.39cb29f.js.map
dolphinscheduler-ui/dist/js/common.733e342.js
dolphinscheduler-ui/dist/js/common.733e342.js.map
dolphinscheduler-ui/dist/js/home/index.78a5d12.js
dolphinscheduler-ui/dist/js/home/index.78a5d12.js.map
dolphinscheduler-ui/dist/js/login/index.291b8e3.js
dolphinscheduler-ui/dist/js/login/index.291b8e3.js.map
dolphinscheduler-ui/dist/lib/external/
/dolphinscheduler-dao/src/main/resources/dao/data_source.properties
dolphinscheduler-dao/src/main/resources/dao/data_source.properties
.mvn/wrapper/*.jar
!/zookeeper_data/

2
README.md

@ -99,9 +99,9 @@ It is because of the shoulders of these open source projects that the birth of t
### Get Help
1. Submit an issue
1. Subscribe the mail list : https://dolphinscheduler.apache.org/en-us/docs/development/subscribe.html. then send mail to dev@dolphinscheduler.apache.org
1. Slack channel: [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://join.slack.com/share/zt-do3gvfhj-UUhrAX2GxkVX_~JJt1jpKA)
1. Contact WeChat(dailidong66). This is just for Mandarin(CN) discussion.
### License
Please refer to [LICENSE](https://github.com/apache/incubator-dolphinscheduler/blob/dev/LICENSE) file.

68
ambari_plugin/README.md

@ -0,0 +1,68 @@
### Dolphin Scheduler的Ambari插件使用说明
##### 备注
1. 本文档适用于对Ambari中基本了解的用户
2. 本文档是对已安装Ambari服务添加Dolphin Scheduler(1.3.0版本)服务的说明
##### 一 安装准备
1. 准备RPM包
- 在源码dolphinscheduler-dist目录下执行命令```mvn -U clean install rpm:attached-rpm -Prpmbuild -Dmaven.test.skip=true -X```即可生成(在目录 dolphinscheduler-dist/target/rpm/apache-dolphinscheduler-incubating/RPMS/noarch 下)
2. 创建DS的安装用户--权限
3. 初始化数据库信息
```
-- 创建Dolphin Scheduler的数据库:dolphinscheduler
CREATE DATABASE dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE
utf8_general_ci;
-- 初始化dolphinscheduler数据库的用户和密码,并分配权限
-- 替换下面sql语句中的{user}为dolphinscheduler数据库的用户
GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'%' IDENTIFIED BY '{password}';
GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'localhost' IDENTIFIED BY
'{password}';
flush privileges;
```
##### 二 Ambari安装Dolphin Scheduler
1. Ambari界面安装Dolphin Scheduler
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_001.png)
2. 选择Dolphin Scheduler的Master安装的节点
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_002.png)
3. 配置Dolphin Scheduler的Worker、Api、Logger、Alert安装的节点
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_003.png)
4. 设置Dolphin Scheduler服务的安装用户(**步骤一中创建的**)及所属的用户组
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_004.png)
5. 配置数据库的信息(和步骤一中初始化数据库中一致)
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_005.png)
6. 配置其它的信息--如果需要的话
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_006.png)
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_007.png)
7. 正常执行接下来的步骤
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_008.png)
8. 安装成功后的界面
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_009.png)

4
ambari_plugin/common-services/DOLPHIN/1.2.1/configuration/dolphin-common.xml

@ -43,7 +43,7 @@
</property>
<property>
<name>zookeeper.connection.timeout</name>
<value>300</value>
<value>30000</value>
<value-attributes>
<type>int</type>
</value-attributes>
@ -73,7 +73,7 @@
</property>
<property>
<name>zookeeper.retry.maxtime</name>
<value>5</value>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>

164
ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json

@ -0,0 +1,164 @@
{
"DOLPHIN": {
"service": [],
"DOLPHIN_API": [
{
"name": "dolphin_api_port_check",
"label": "dolphin_api_port_check",
"description": "dolphin_api_port_check.",
"interval": 10,
"scope": "ANY",
"source": {
"type": "PORT",
"uri": "{{dolphin-application-api/server.port}}",
"default_port": 12345,
"reporting": {
"ok": {
"text": "TCP OK - {0:.3f}s response on port {1}"
},
"warning": {
"text": "TCP OK - {0:.3f}s response on port {1}",
"value": 1.5
},
"critical": {
"text": "Connection failed: {0} to {1}:{2}",
"value": 5.0
}
}
}
}
],
"DOLPHIN_MASTER": [
{
"name": "DOLPHIN_MASTER_CHECK",
"label": "check dolphin scheduler master status",
"description": "",
"interval":10,
"scope": "HOST",
"enabled": true,
"source": {
"type": "SCRIPT",
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
"parameters": [
{
"name": "connection.timeout",
"display_name": "Connection Timeout",
"value": 5.0,
"type": "NUMERIC",
"description": "The maximum time before this alert is considered to be CRITICAL",
"units": "seconds",
"threshold": "CRITICAL"
},
{
"name": "alertName",
"display_name": "alertName",
"value": "DOLPHIN_MASTER",
"type": "STRING",
"description": "alert name"
}
]
}
}
],
"DOLPHIN_WORKER": [
{
"name": "DOLPHIN_WORKER_CHECK",
"label": "check dolphin scheduler worker status",
"description": "",
"interval":10,
"scope": "HOST",
"enabled": true,
"source": {
"type": "SCRIPT",
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
"parameters": [
{
"name": "connection.timeout",
"display_name": "Connection Timeout",
"value": 5.0,
"type": "NUMERIC",
"description": "The maximum time before this alert is considered to be CRITICAL",
"units": "seconds",
"threshold": "CRITICAL"
},
{
"name": "alertName",
"display_name": "alertName",
"value": "DOLPHIN_WORKER",
"type": "STRING",
"description": "alert name"
}
]
}
}
],
"DOLPHIN_ALERT": [
{
"name": "DOLPHIN_DOLPHIN_ALERT_CHECK",
"label": "check dolphin scheduler alert status",
"description": "",
"interval":10,
"scope": "HOST",
"enabled": true,
"source": {
"type": "SCRIPT",
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
"parameters": [
{
"name": "connection.timeout",
"display_name": "Connection Timeout",
"value": 5.0,
"type": "NUMERIC",
"description": "The maximum time before this alert is considered to be CRITICAL",
"units": "seconds",
"threshold": "CRITICAL"
},
{
"name": "alertName",
"display_name": "alertName",
"value": "DOLPHIN_ALERT",
"type": "STRING",
"description": "alert name"
}
]
}
}
],
"DOLPHIN_ALERT": [
{
"name": "DOLPHIN_DOLPHIN_LOGGER_CHECK",
"label": "check dolphin scheduler alert status",
"description": "",
"interval":10,
"scope": "HOST",
"enabled": true,
"source": {
"type": "SCRIPT",
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
"parameters": [
{
"name": "connection.timeout",
"display_name": "Connection Timeout",
"value": 5.0,
"type": "NUMERIC",
"description": "The maximum time before this alert is considered to be CRITICAL",
"units": "seconds",
"threshold": "CRITICAL"
},
{
"name": "alertName",
"display_name": "alertName",
"value": "DOLPHIN_LOGGER",
"type": "STRING",
"description": "alert name"
}
]
}
}
]
}
}

143
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-alert.xml

@ -0,0 +1,143 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>alert.type</name>
<value>EMAIL</value>
<description>alert type is EMAIL/SMS</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>alert.template</name>
<value>html</value>
<description>alter msg template, default is html template</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.protocol</name>
<value>SMTP</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.server.host</name>
<value>xxx.xxx.com</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.server.port</name>
<value>25</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.sender</name>
<value>admin</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.user</name>
<value>admin</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.passwd</name>
<value>000000</value>
<description></description>
<property-type>PASSWORD</property-type>
<value-attributes>
<type>password</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.smtp.starttls.enable</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.smtp.ssl.enable</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.smtp.ssl.trust</name>
<value>xxx.xxx.com</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.enable</name>
<value>false</value>
<description></description>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>true</value>
<label>Enabled</label>
</entry>
<entry>
<value>false</value>
<label>Disabled</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.corp.id</name>
<value>wechatId</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.secret</name>
<value>secret</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.agent.id</name>
<value>agentId</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>enterprise.wechat.users</name>
<value>wechatUsers</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

87
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-application-api.xml

@ -0,0 +1,87 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>server.port</name>
<value>12345</value>
<description>
server port
</description>
<value-attributes>
<type>int</type>
</value-attributes>
</property>
<property>
<name>server.servlet.session.timeout</name>
<value>7200</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
</property>
<property>
<name>server.servlet.context-path</name>
<value>/dolphinscheduler/</value>
<description>
</description>
</property>
<property>
<name>spring.servlet.multipart.max-file-size</name>
<value>1024</value>
<value-attributes>
<unit>MB</unit>
<type>int</type>
</value-attributes>
<description>
</description>
</property>
<property>
<name>spring.servlet.multipart.max-request-size</name>
<value>1024</value>
<value-attributes>
<unit>MB</unit>
<type>int</type>
</value-attributes>
<description>
</description>
</property>
<property>
<name>server.jetty.max-http-post-size</name>
<value>5000000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
</property>
<property>
<name>spring.messages.encoding</name>
<value>UTF-8</value>
<description></description>
</property>
<property>
<name>spring.messages.basename</name>
<value>i18n/messages</value>
<description></description>
</property>
<property>
<name>security.authentication.type</name>
<value>PASSWORD</value>
<description></description>
</property>
</configuration>

158
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-common.xml

@ -0,0 +1,158 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>resource.storage.type</name>
<display-name>Choose Resource Upload Startup Type</display-name>
<description>
Resource upload startup type : HDFS,S3,NONE
</description>
<value>NONE</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>HDFS</value>
<label>HDFS</label>
</entry>
<entry>
<value>S3</value>
<label>S3</label>
</entry>
<entry>
<value>NONE</value>
<label>NONE</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>resource.upload.path</name>
<value>/dolphinscheduler</value>
<description>
resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>data.basedir.path</name>
<value>/tmp/dolphinscheduler</value>
<description>
user data local directory path, please make sure the directory exists and have read write permissions
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>hadoop.security.authentication.startup.state</name>
<value>false</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>true</value>
<label>Enabled</label>
</entry>
<entry>
<value>false</value>
<label>Disabled</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>whether kerberos starts</description>
</property>
<property>
<name>java.security.krb5.conf.path</name>
<value>/opt/krb5.conf</value>
<description>
java.security.krb5.conf path
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>login.user.keytab.username</name>
<value>hdfs-mycluster@ESZ.COM</value>
<description>
LoginUserFromKeytab user
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>login.user.keytab.path</name>
<value>/opt/hdfs.headless.keytab</value>
<description>
LoginUserFromKeytab path
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>resource.view.suffixs</name>
<value>txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties</value>
<description></description>
</property>
<property>
<name>hdfs.root.user</name>
<value>hdfs</value>
<description>
Users who have permission to create directories under the HDFS root path
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster:8020</value>
<description>
HA or single namenode,
If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory,
support s3,for example : s3a://dolphinscheduler
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.s3a.endpoint</name>
<value>http://host:9010</value>
<description>
s3 need,s3 endpoint
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.s3a.access.key</name>
<value>A3DXS30FO22544RE</value>
<description>
s3 need,s3 access key
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.s3a.secret.key</name>
<value>OloCLq3n+8+sdPHUhJ21XrSxTC+JK</value>
<description>
s3 need,s3 secret key
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>kerberos.expire.time</name>
<value>7</value>
<description></description>
</property>
</configuration>

467
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-datasource.xml

@ -0,0 +1,467 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>spring.datasource.initialSize</name>
<value>5</value>
<description>
Init connection number
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.minIdle</name>
<value>5</value>
<description>
Min connection number
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.maxActive</name>
<value>50</value>
<description>
Max connection number
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.maxWait</name>
<value>60000</value>
<description>
Max wait time for get a connection in milliseconds.
If configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.timeBetweenEvictionRunsMillis</name>
<value>60000</value>
<description>
Milliseconds for check to close free connections
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.timeBetweenConnectErrorMillis</name>
<value>60000</value>
<description>
The Destroy thread detects the connection interval and closes the physical connection in milliseconds
if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.minEvictableIdleTimeMillis</name>
<value>300000</value>
<description>
The longest time a connection remains idle without being evicted, in milliseconds
</description>
<value-attributes>
<type>int</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.validationQuery</name>
<value>SELECT 1</value>
<description>
The SQL used to check whether the connection is valid requires a query statement.
If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.validationQueryTimeout</name>
<value>3</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
Check whether the connection is valid for timeout, in seconds
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.testWhileIdle</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
When applying for a connection,
if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
validation Query is performed to check whether the connection is valid
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.testOnBorrow</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
Execute validation to check if the connection is valid when applying for a connection
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.testOnReturn</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
Execute validation to check if the connection is valid when the connection is returned
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.defaultAutoCommit</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.keepAlive</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.poolPreparedStatements</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description>
Open PSCache, specify count PSCache for every connection
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.maxPoolPreparedStatementPerConnectionSize</name>
<value>20</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.spring.datasource.filters</name>
<value>stat,wall,log4j</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>spring.datasource.connectionProperties</name>
<value>druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.mapper-locations</name>
<value>classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.typeEnumsPackage</name>
<value>org.apache.dolphinscheduler.*.enums</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.typeAliasesPackage</name>
<value>org.apache.dolphinscheduler.dao.entity</value>
<description>
Entity scan, where multiple packages are separated by a comma or semicolon
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.id-type</name>
<value>AUTO</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>AUTO</value>
<label>AUTO</label>
</entry>
<entry>
<value>INPUT</value>
<label>INPUT</label>
</entry>
<entry>
<value>ID_WORKER</value>
<label>ID_WORKER</label>
</entry>
<entry>
<value>UUID</value>
<label>UUID</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>
Primary key type AUTO:" database ID AUTO ",
INPUT:" user INPUT ID",
ID_WORKER:" global unique ID (numeric type unique ID)",
UUID:" global unique ID UUID";
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.field-strategy</name>
<value>NOT_NULL</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>IGNORED</value>
<label>IGNORED</label>
</entry>
<entry>
<value>NOT_NULL</value>
<label>NOT_NULL</label>
</entry>
<entry>
<value>NOT_EMPTY</value>
<label>NOT_EMPTY</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>
Field policy IGNORED:" ignore judgment ",
NOT_NULL:" not NULL judgment "),
NOT_EMPTY:" not NULL judgment"
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.column-underline</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.logic-delete-value</name>
<value>1</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.logic-not-delete-value</name>
<value>0</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.banner</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.map-underscore-to-camel-case</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.cache-enabled</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.call-setters-on-nulls</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.jdbc-type-for-null</name>
<value>null</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.exec.task.num</name>
<value>20</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.retryTimes</name>
<value>5</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.interval</name>
<value>1000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.reserved.memory</name>
<value>0.1</value>
<value-attributes>
<type>float</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.fetch.task.num</name>
<value>3</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.reserved.memory</name>
<value>0.1</value>
<value-attributes>
<type>float</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

123
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-env.xml

@ -0,0 +1,123 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>dolphin.database.type</name>
<value>mysql</value>
<description>Dolphin Scheduler DataBase Type Which Is Select</description>
<display-name>Dolphin Database Type</display-name>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>mysql</value>
<label>Mysql</label>
</entry>
<entry>
<value>postgresql</value>
<label>Postgresql</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.database.host</name>
<value></value>
<display-name>Dolphin Database Host</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.database.port</name>
<value></value>
<display-name>Dolphin Database Port</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.database.username</name>
<value></value>
<display-name>Dolphin Database Username</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.database.password</name>
<value></value>
<display-name>Dolphin Database Password</display-name>
<property-type>PASSWORD</property-type>
<value-attributes>
<type>password</type>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.user</name>
<value></value>
<description>Which user to install and admin dolphin scheduler</description>
<display-name>Deploy User</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphin.group</name>
<value></value>
<description>Which user to install and admin dolphin scheduler</description>
<display-name>Deploy Group</display-name>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphinscheduler-env-content</name>
<display-name>Dolphinscheduler Env template</display-name>
<description>This is the jinja template for dolphinscheduler.env.sh file</description>
<value>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/opt/soft/python
export JAVA_HOME=/opt/soft/java
export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink</value>
<value-attributes>
<type>content</type>
<empty-value-valid>false</empty-value-valid>
<show-property-name>false</show-property-name>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

88
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-master.xml

@ -0,0 +1,88 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>master.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>master execute thread num</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.exec.task.num</name>
<value>20</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>master execute task number in parallel</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>master heartbeat interval</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.retryTimes</name>
<value>5</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>master commit task retry times</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.interval</name>
<value>1000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>master commit task interval</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>only less than cpu avg load, master server can work. default value : the number of cpu cores * 2</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.reserved.memory</name>
<value>0.3</value>
<description>only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.listen.port</name>
<value>5678</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>master listen port</description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

126
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-quartz.xml

@ -0,0 +1,126 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>org.quartz.scheduler.instanceName</name>
<value>DolphinScheduler</value>
<description></description>
</property>
<property>
<!-- 列举枚举值 -->
<name>org.quartz.scheduler.instanceId</name>
<value>AUTO</value>
<description></description>
</property>
<property>
<name>org.quartz.scheduler.makeSchedulerThreadDaemon</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.useProperties</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.threadPool.class</name>
<value>org.quartz.simpl.SimpleThreadPool</value>
<description></description>
</property>
<property>
<name>org.quartz.threadPool.makeThreadsDaemons</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.threadPool.threadCount</name>
<value>25</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.threadPool.threadPriority</name>
<value>5</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.class</name>
<value>org.quartz.impl.jdbcjobstore.JobStoreTX</value>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.tablePrefix</name>
<value>QRTZ_</value>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.isClustered</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.misfireThreshold</name>
<value>60000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.clusterCheckinInterval</name>
<value>5000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.acquireTriggersWithinLock</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
</property>
<property>
<name>org.quartz.jobStore.dataSource</name>
<value>myDs</value>
<description></description>
</property>
<property>
<name>org.quartz.dataSource.myDs.connectionProvider.class</name>
<value>org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider</value>
<description></description>
</property>
</configuration>

76
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-worker.xml

@ -0,0 +1,76 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>worker.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>worker execute thread num</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>worker heartbeat interval</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.fetch.task.num</name>
<value>3</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>submit the number of tasks at a time</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.reserved.memory</name>
<value>0.3</value>
<description>only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G.</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.listen.port</name>
<value>1234</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>worker listen port</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.group</name>
<value>default</value>
<description>default worker group</description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

84
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-zookeeper.xml

@ -0,0 +1,84 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>dolphinscheduler.queue.impl</name>
<value>zookeeper</value>
<description>
Task queue implementation, default "zookeeper"
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.dolphinscheduler.root</name>
<value>/dolphinscheduler</value>
<description>
dolphinscheduler root directory
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.session.timeout</name>
<value>300</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.connection.timeout</name>
<value>300</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.retry.base.sleep</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.retry.max.sleep</name>
<value>30000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.retry.maxtime</name>
<value>5</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>
</description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

137
ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml

@ -0,0 +1,137 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<metainfo>
<schemaVersion>2.0</schemaVersion>
<services>
<service>
<name>DOLPHIN</name>
<displayName>Dolphin Scheduler</displayName>
<comment>分布式易扩展的可视化DAG工作流任务调度系统</comment>
<version>1.3.0</version>
<components>
<component>
<name>DOLPHIN_MASTER</name>
<displayName>DS Master</displayName>
<category>MASTER</category>
<cardinality>1+</cardinality>
<commandScript>
<script>scripts/dolphin_master_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
<component>
<name>DOLPHIN_LOGGER</name>
<displayName>DS Logger</displayName>
<category>SLAVE</category>
<cardinality>1+</cardinality>
<commandScript>
<script>scripts/dolphin_logger_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
<component>
<name>DOLPHIN_WORKER</name>
<displayName>DS Worker</displayName>
<category>SLAVE</category>
<cardinality>1+</cardinality>
<dependencies>
<dependency>
<name>DOLPHIN/DOLPHIN_LOGGER</name>
<scope>host</scope>
<auto-deploy>
<enabled>true</enabled>
</auto-deploy>
</dependency>
</dependencies>
<commandScript>
<script>scripts/dolphin_worker_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
<component>
<name>DOLPHIN_ALERT</name>
<displayName>DS Alert</displayName>
<category>SLAVE</category>
<cardinality>1</cardinality>
<commandScript>
<script>scripts/dolphin_alert_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
<component>
<name>DOLPHIN_API</name>
<displayName>DS_Api</displayName>
<category>SLAVE</category>
<cardinality>1</cardinality>
<commandScript>
<script>scripts/dolphin_api_service.py</script>
<scriptType>PYTHON</scriptType>
<timeout>600</timeout>
</commandScript>
</component>
</components>
<requiredServices>
<service>ZOOKEEPER</service>
</requiredServices>
<osSpecifics>
<osSpecific>
<osFamily>any</osFamily>
<packages>
<package>
<name>apache-dolphinscheduler-incubating-1.3.0*</name>
</package>
</packages>
</osSpecific>
</osSpecifics>
<configuration-dependencies>
<config-type>dolphin-alert</config-type>
<config-type>dolphin-app-api</config-type>
<config-type>dolphin-app-dao</config-type>
<config-type>dolphin-common</config-type>
<config-type>dolphin-env</config-type>
<config-type>dolphin-quartz</config-type>
</configuration-dependencies>
<themes>
<theme>
<fileName>theme.json</fileName>
<default>true</default>
</theme>
</themes>
<quickLinksConfigurations-dir>quicklinks</quickLinksConfigurations-dir>
<quickLinksConfigurations>
<quickLinksConfiguration>
<fileName>quicklinks.json</fileName>
<default>true</default>
</quickLinksConfiguration>
</quickLinksConfigurations>
</service>
</services>
</metainfo>

124
ambari_plugin/common-services/DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py

@ -0,0 +1,124 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
import urllib2
import os
import logging
import ambari_simplejson as json
from resource_management.libraries.script.script import Script
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
logger = logging.getLogger('ambari_alerts')
config = Script.get_config()
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
:rtype tuple
"""
def get_info(url, connection_timeout):
response = None
try:
response = urllib2.urlopen(url, timeout=connection_timeout)
json_data = response.read()
return json_data
finally:
if response is not None:
try:
response.close()
except:
pass
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations : a mapping of configuration key to value
parameters : a mapping of script parameter key to value
host_name : the name of this host where the alert is running
:type configurations dict
:type parameters dict
:type host_name str
"""
alert_name = parameters['alertName']
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler"
pid = "0"
from resource_management.core import sudo
is_running = True
pid_file_path = ""
if alert_name == 'DOLPHIN_MASTER':
pid_file_path = dolphin_pidfile_dir + "/master-server.pid"
elif alert_name == 'DOLPHIN_WORKER':
pid_file_path = dolphin_pidfile_dir + "/worker-server.pid"
elif alert_name == 'DOLPHIN_ALERT':
pid_file_path = dolphin_pidfile_dir + "/alert-server.pid"
elif alert_name == 'DOLPHIN_LOGGER':
pid_file_path = dolphin_pidfile_dir + "/logger-server.pid"
elif alert_name == 'DOLPHIN_API':
pid_file_path = dolphin_pidfile_dir + "/api-server.pid"
if not pid_file_path or not os.path.isfile(pid_file_path):
is_running = False
try:
pid = int(sudo.read_file(pid_file_path))
except:
is_running = False
try:
# Kill will not actually kill the process
# From the doc:
# If sig is 0, then no signal is sent, but error checking is still
# performed; this can be used to check for the existence of a
# process ID or process group ID.
sudo.kill(pid, 0)
except OSError:
is_running = False
if host_name is None:
host_name = socket.getfqdn()
if not is_running:
result_code = "CRITICAL"
else:
result_code = "OK"
label = "The comment {0} of DOLPHIN_SCHEDULER on {1} is {2}".format(alert_name, host_name, result_code)
return ((result_code, [label]))
if __name__ == "__main__":
pass

61
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_alert_service.py

@ -0,0 +1,61 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinAlertService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
no_op_test = format("ls {dolphin_pidfile_dir}/alert-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/alert-server.pid` | grep `cat {dolphin_pidfile_dir}/alert-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start alert-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop alert-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "alert-server.pid")
if __name__ == "__main__":
DolphinAlertService().execute()

70
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_api_service.py

@ -0,0 +1,70 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinApiService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
#init
init_cmd=format("sh " + params.dolphin_home + "/script/create-dolphinscheduler.sh")
Execute(init_cmd, user=params.dolphin_user)
#upgrade
upgrade_cmd=format("sh " + params.dolphin_home + "/script/upgrade-dolphinscheduler.sh")
Execute(upgrade_cmd, user=params.dolphin_user)
no_op_test = format("ls {dolphin_pidfile_dir}/api-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/api-server.pid` | grep `cat {dolphin_pidfile_dir}/api-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start api-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop api-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "api-server.pid")
if __name__ == "__main__":
DolphinApiService().execute()

123
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_env.py

@ -0,0 +1,123 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
def dolphin_env():
import params
Directory(params.dolphin_pidfile_dir,
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
Directory(params.dolphin_log_dir,
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
Directory(params.dolphin_conf_dir,
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
Directory(params.dolphin_common_map['data.basedir.path'],
mode=0777,
owner=params.dolphin_user,
group=params.dolphin_group,
create_parents=True
)
File(format(params.dolphin_env_path),
mode=0777,
content=InlineTemplate(params.dolphin_env_content),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh"),
mode=0755,
content=Template("dolphin-daemon.sh.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/master.properties"),
mode=0755,
content=Template("master.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/worker.properties"),
mode=0755,
content=Template("worker.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/alert.properties"),
mode=0755,
content=Template("alert.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/datasource.properties"),
mode=0755,
content=Template("datasource.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/application-api.properties"),
mode=0755,
content=Template("application-api.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/common.properties"),
mode=0755,
content=Template("common.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/quartz.properties"),
mode=0755,
content=Template("quartz.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)
File(format(params.dolphin_conf_dir + "/zookeeper.properties"),
mode=0755,
content=Template("zookeeper.properties.j2"),
owner=params.dolphin_user,
group=params.dolphin_group
)

61
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_logger_service.py

@ -0,0 +1,61 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinLoggerService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
no_op_test = format("ls {dolphin_pidfile_dir}/logger-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/logger-server.pid` | grep `cat {dolphin_pidfile_dir}/logger-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start logger-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop logger-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "logger-server.pid")
if __name__ == "__main__":
DolphinLoggerService().execute()

61
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_master_service.py

@ -0,0 +1,61 @@
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinMasterService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
no_op_test = format("ls {dolphin_pidfile_dir}/master-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/master-server.pid` | grep `cat {dolphin_pidfile_dir}/master-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start master-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop master-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "master-server.pid")
if __name__ == "__main__":
DolphinMasterService().execute()

60
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_worker_service.py

@ -0,0 +1,60 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from resource_management import *
from dolphin_env import dolphin_env
class DolphinWorkerService(Script):
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
def configure(self, env):
import params
params.pika_slave = True
env.set_params(params)
dolphin_env()
def start(self, env):
import params
env.set_params(params)
self.configure(env)
no_op_test = format("ls {dolphin_pidfile_dir}/worker-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/worker-server.pid` | grep `cat {dolphin_pidfile_dir}/worker-server.pid` >/dev/null 2>&1")
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start worker-server")
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop worker-server")
Execute(stop_cmd, user=params.dolphin_user)
time.sleep(5)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.dolphin_run_dir + "worker-server.pid")
if __name__ == "__main__":
DolphinWorkerService().execute()

154
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/params.py

@ -0,0 +1,154 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from resource_management.core.logger import Logger
from resource_management.libraries.functions import default
Logger.initialize_logger()
reload(sys)
sys.setdefaultencoding('utf-8')
# server configurations
config = Script.get_config()
# conf_dir = "/etc/"
dolphin_home = "/opt/soft/dolphinscheduler"
dolphin_conf_dir = dolphin_home + "/conf"
dolphin_log_dir = dolphin_home + "/logs"
dolphin_bin_dir = dolphin_home + "/bin"
dolphin_lib_jars = dolphin_home + "/lib/*"
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler"
rmHosts = default("/clusterHostInfo/rm_host", [])
# dolphin-env
dolphin_env_map = {}
dolphin_env_map.update(config['configurations']['dolphin-env'])
# which user to install and admin dolphin scheduler
dolphin_user = dolphin_env_map['dolphin.user']
dolphin_group = dolphin_env_map['dolphin.group']
# .dolphinscheduler_env.sh
dolphin_env_path = dolphin_conf_dir + '/env/dolphinscheduler_env.sh'
dolphin_env_content = dolphin_env_map['dolphinscheduler-env-content']
# database config
dolphin_database_config = {}
dolphin_database_config['dolphin_database_type'] = dolphin_env_map['dolphin.database.type']
dolphin_database_config['dolphin_database_username'] = dolphin_env_map['dolphin.database.username']
dolphin_database_config['dolphin_database_password'] = dolphin_env_map['dolphin.database.password']
if 'mysql' == dolphin_database_config['dolphin_database_type']:
dolphin_database_config['dolphin_database_driver'] = 'com.mysql.jdbc.Driver'
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.StdJDBCDelegate'
dolphin_database_config['dolphin_database_url'] = 'jdbc:mysql://' + dolphin_env_map['dolphin.database.host'] \
+ ':' + dolphin_env_map['dolphin.database.port'] \
+ '/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8'
else:
dolphin_database_config['dolphin_database_driver'] = 'org.postgresql.Driver'
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.PostgreSQLDelegate'
dolphin_database_config['dolphin_database_url'] = 'jdbc:postgresql://' + dolphin_env_map['dolphin.database.host'] \
+ ':' + dolphin_env_map['dolphin.database.port'] \
+ '/dolphinscheduler'
# application-alert.properties
dolphin_alert_map = {}
wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token'
wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret'
wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}'
dolphin_alert_config_map = config['configurations']['dolphin-alert']
if dolphin_alert_config_map['enterprise.wechat.enable']:
dolphin_alert_map['enterprise.wechat.push.ur'] = wechat_push_url
dolphin_alert_map['enterprise.wechat.token.url'] = wechat_token_url
dolphin_alert_map['enterprise.wechat.team.send.msg'] = wechat_team_send_msg
dolphin_alert_map['enterprise.wechat.user.send.msg'] = wechat_user_send_msg
dolphin_alert_map.update(dolphin_alert_config_map)
# application-api.properties
dolphin_app_api_map = {}
dolphin_app_api_map.update(config['configurations']['dolphin-application-api'])
# common.properties
dolphin_common_map = {}
if 'yarn-site' in config['configurations'] and \
'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']:
yarn_resourcemanager_webapp_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']
yarn_application_status_address = 'http://' + yarn_resourcemanager_webapp_address + '/ws/v1/cluster/apps/%s'
dolphin_common_map['yarn.application.status.address'] = yarn_application_status_address
rmHosts = default("/clusterHostInfo/rm_host", [])
if len(rmHosts) > 1:
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ','.join(rmHosts)
else:
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ''
dolphin_common_map_tmp = config['configurations']['dolphin-common']
data_basedir_path = dolphin_common_map_tmp['data.basedir.path']
process_exec_basepath = data_basedir_path + '/exec'
data_download_basedir_path = data_basedir_path + '/download'
dolphin_common_map['process.exec.basepath'] = process_exec_basepath
dolphin_common_map['data.download.basedir.path'] = data_download_basedir_path
dolphin_common_map['dolphinscheduler.env.path'] = dolphin_env_path
dolphin_common_map.update(config['configurations']['dolphin-common'])
# datasource.properties
dolphin_datasource_map = {}
dolphin_datasource_map['spring.datasource.type'] = 'com.alibaba.druid.pool.DruidDataSource'
dolphin_datasource_map['spring.datasource.driver-class-name'] = dolphin_database_config['dolphin_database_driver']
dolphin_datasource_map['spring.datasource.url'] = dolphin_database_config['dolphin_database_url']
dolphin_datasource_map['spring.datasource.username'] = dolphin_database_config['dolphin_database_username']
dolphin_datasource_map['spring.datasource.password'] = dolphin_database_config['dolphin_database_password']
dolphin_datasource_map.update(config['configurations']['dolphin-datasource'])
# master.properties
dolphin_master_map = config['configurations']['dolphin-master']
# quartz.properties
dolphin_quartz_map = {}
dolphin_quartz_map['org.quartz.jobStore.driverDelegateClass'] = dolphin_database_config['driverDelegateClass']
dolphin_quartz_map.update(config['configurations']['dolphin-quartz'])
# worker.properties
dolphin_worker_map = config['configurations']['dolphin-worker']
# zookeeper.properties
dolphin_zookeeper_map={}
zookeeperHosts = default("/clusterHostInfo/zookeeper_hosts", [])
if len(zookeeperHosts) > 0 and "clientPort" in config['configurations']['zoo.cfg']:
clientPort = config['configurations']['zoo.cfg']['clientPort']
zookeeperPort = ":" + clientPort + ","
dolphin_zookeeper_map['zookeeper.quorum'] = zookeeperPort.join(zookeeperHosts) + ":" + clientPort
dolphin_zookeeper_map.update(config['configurations']['dolphin-zookeeper'])

31
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/service_check.py

@ -0,0 +1,31 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from resource_management.libraries.functions import get_unique_id_and_date
class ServiceCheck(Script):
def service_check(self, env):
import params
#env.set_params(params)
# Execute(format("which pika_server"))
if __name__ == "__main__":
ServiceCheck().execute()

23
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/status_params.py

@ -0,0 +1,23 @@
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
config = Script.get_config()
dolphin_run_dir = "/opt/soft/run/dolphinscheduler/"

20
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/alert.properties.j2

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{% for key, value in dolphin_alert_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

20
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/application-api.properties.j2

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{% for key, value in dolphin_app_api_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

20
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/common.properties.j2

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{% for key, value in dolphin_common_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

20
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/datasource.properties.j2

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{% for key, value in dolphin_datasource_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

116
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/dolphin-daemon.sh.j2

@ -0,0 +1,116 @@
#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
usage="Usage: dolphinscheduler-daemon.sh (start|stop) <command> "
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
startStop=$1
shift
command=$1
shift
echo "Begin $startStop $command......"
BIN_DIR=`dirname $0`
BIN_DIR=`cd "$BIN_DIR"; pwd`
DOLPHINSCHEDULER_HOME=$BIN_DIR/..
export HOSTNAME=`hostname`
DOLPHINSCHEDULER_LIB_JARS={{dolphin_lib_jars}}
DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms4g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
STOP_TIMEOUT=5
log={{dolphin_log_dir}}/dolphinscheduler-$command-$HOSTNAME.out
pid={{dolphin_pidfile_dir}}/$command.pid
cd $DOLPHINSCHEDULER_HOME
if [ "$command" = "api-server" ]; then
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-api.xml -Dspring.profiles.active=api"
CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer
elif [ "$command" = "master-server" ]; then
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-master.xml -Ddruid.mysql.usePingMethod=false"
CLASS=org.apache.dolphinscheduler.server.master.MasterServer
elif [ "$command" = "worker-server" ]; then
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-worker.xml -Ddruid.mysql.usePingMethod=false"
CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer
elif [ "$command" = "alert-server" ]; then
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-alert.xml"
CLASS=org.apache.dolphinscheduler.alert.AlertServer
elif [ "$command" = "logger-server" ]; then
CLASS=org.apache.dolphinscheduler.server.log.LoggerServer
else
echo "Error: No command named \`$command' was found."
exit 1
fi
case $startStop in
(start)
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo $command running as process `cat $pid`. Stop it first.
exit 1
fi
fi
echo starting $command, logging to $log
exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath {{dolphin_conf_dir}}:{{dolphin_lib_jars}} $CLASS"
echo "nohup java $exec_command > $log 2>&1 < /dev/null &"
nohup java $exec_command > $log 2>&1 < /dev/null &
echo $! > $pid
;;
(stop)
if [ -f $pid ]; then
TARGET_PID=`cat $pid`
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo stopping $command
kill $TARGET_PID
sleep $STOP_TIMEOUT
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo "$command did not stop gracefully after $STOP_TIMEOUT seconds: killing with kill -9"
kill -9 $TARGET_PID
fi
else
echo no $command to stop
fi
rm -f $pid
else
echo no $command to stop
fi
;;
(*)
echo $usage
exit 1
;;
esac
echo "End $startStop $command."

20
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/master.properties.j2

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{% for key, value in dolphin_master_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

20
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/quartz.properties.j2

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{% for key, value in dolphin_quartz_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

20
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/worker.properties.j2

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{% for key, value in dolphin_worker_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

20
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/zookeeper.properties.j2

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{% for key, value in dolphin_zookeeper_map.iteritems() -%}
{{key}}={{value}}
{% endfor %}

26
ambari_plugin/common-services/DOLPHIN/1.3.0/quicklinks/quicklinks.json

@ -0,0 +1,26 @@
{
"name": "default",
"description": "default quick links configuration",
"configuration": {
"protocol":
{
"type":"http"
},
"links": [
{
"name": "dolphin-application-ui",
"label": "DolphinApplication UI",
"requires_user_name": "false",
"component_name": "DOLPHIN_API",
"url": "%@://%@:%@/dolphinscheduler/ui/view/login/index.html",
"port":{
"http_property": "server.port",
"http_default_port": "12345",
"regex": "^(\\d+)$",
"site": "dolphin-application-api"
}
}
]
}
}

661
ambari_plugin/common-services/DOLPHIN/1.3.0/themes/theme.json

@ -0,0 +1,661 @@
{
"name": "default",
"description": "Default theme for Dolphin Scheduler service",
"configuration": {
"layouts": [
{
"name": "default",
"tabs": [
{
"name": "settings",
"display-name": "Settings",
"layout": {
"tab-rows": "3",
"tab-columns": "3",
"sections": [
{
"name": "dolphin-env-config",
"display-name": "Dolphin Env Config",
"row-index": "0",
"column-index": "0",
"row-span": "1",
"column-span": "2",
"section-rows": "1",
"section-columns": "2",
"subsections": [
{
"name": "env-row1-col1",
"display-name": "Deploy User Info",
"row-index": "0",
"column-index": "0",
"row-span": "1",
"column-span": "1"
},
{
"name": "env-row1-col2",
"display-name": "System Env Optimization",
"row-index": "0",
"column-index": "1",
"row-span": "1",
"column-span": "1"
}
]
},
{
"name": "dolphin-database-config",
"display-name": "Database Config",
"row-index": "1",
"column-index": "0",
"row-span": "1",
"column-span": "2",
"section-rows": "1",
"section-columns": "3",
"subsections": [
{
"name": "database-row1-col1",
"row-index": "0",
"column-index": "0",
"row-span": "1",
"column-span": "1"
},
{
"name": "database-row1-col2",
"row-index": "0",
"column-index": "1",
"row-span": "1",
"column-span": "1"
},
{
"name": "database-row1-col3",
"row-index": "0",
"column-index": "2",
"row-span": "1",
"column-span": "1"
}
]
},
{
"name": "dynamic-config",
"row-index": "2",
"column-index": "0",
"row-span": "1",
"column-span": "2",
"section-rows": "1",
"section-columns": "3",
"subsections": [
{
"name": "dynamic-row1-col1",
"display-name": "Resource FS Config",
"row-index": "0",
"column-index": "0",
"row-span": "1",
"column-span": "1"
},
{
"name": "dynamic-row1-col2",
"display-name": "Kerberos Info",
"row-index": "0",
"column-index": "1",
"row-span": "1",
"column-span": "1"
},
{
"name": "dynamic-row1-col3",
"display-name": "Wechat Info",
"row-index": "0",
"column-index": "1",
"row-span": "1",
"column-span": "1"
}
]
}
]
}
}
]
}
],
"placement": {
"configuration-layout": "default",
"configs": [
{
"config": "dolphin-env/dolphin.database.type",
"subsection-name": "database-row1-col1"
},
{
"config": "dolphin-env/dolphin.database.host",
"subsection-name": "database-row1-col2"
},
{
"config": "dolphin-env/dolphin.database.port",
"subsection-name": "database-row1-col2"
},
{
"config": "dolphin-env/dolphin.database.username",
"subsection-name": "database-row1-col3"
},
{
"config": "dolphin-env/dolphin.database.password",
"subsection-name": "database-row1-col3"
},
{
"config": "dolphin-env/dolphin.user",
"subsection-name": "env-row1-col1"
},
{
"config": "dolphin-env/dolphin.group",
"subsection-name": "env-row1-col1"
},
{
"config": "dolphin-env/dolphinscheduler-env-content",
"subsection-name": "env-row1-col2"
},
{
"config": "dolphin-common/resource.storage.type",
"subsection-name": "dynamic-row1-col1"
},
{
"config": "dolphin-common/resource.upload.path",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/resource.storage.type"
],
"if": "${dolphin-common/resource.storage.type} === HDFS || ${dolphin-common/resource.storage.type} === S3",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/hdfs.root.user",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/resource.storage.type"
],
"if": "${dolphin-common/resource.storage.type} === HDFS",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/data.store2hdfs.basepath",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/resource.storage.type"
],
"if": "${dolphin-common/resource.storage.type} === HDFS",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/fs.defaultFS",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/resource.storage.type"
],
"if": "${dolphin-common/resource.storage.type} === HDFS",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/fs.s3a.endpoint",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/resource.storage.type"
],
"if": "${dolphin-common/resource.storage.type} === S3",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/fs.s3a.access.key",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/resource.storage.type"
],
"if": "${dolphin-common/resource.storage.type} === S3",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/fs.s3a.secret.key",
"subsection-name": "dynamic-row1-col1",
"depends-on": [
{
"configs":[
"dolphin-common/resource.storage.type"
],
"if": "${dolphin-common/resource.storage.type} === S3",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/hadoop.security.authentication.startup.state",
"subsection-name": "dynamic-row1-col2"
},
{
"config": "dolphin-common/java.security.krb5.conf.path",
"subsection-name": "dynamic-row1-col2",
"depends-on": [
{
"configs":[
"dolphin-common/hadoop.security.authentication.startup.state"
],
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/login.user.keytab.username",
"subsection-name": "dynamic-row1-col2",
"depends-on": [
{
"configs":[
"dolphin-common/hadoop.security.authentication.startup.state"
],
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/login.user.keytab.path",
"subsection-name": "dynamic-row1-col2",
"depends-on": [
{
"configs":[
"dolphin-common/hadoop.security.authentication.startup.state"
],
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-common/kerberos.expire.time",
"subsection-name": "dynamic-row1-col2",
"depends-on": [
{
"configs":[
"dolphin-common/hadoop.security.authentication.startup.state"
],
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-alert/enterprise.wechat.enable",
"subsection-name": "dynamic-row1-col3"
},
{
"config": "dolphin-alert/enterprise.wechat.corp.id",
"subsection-name": "dynamic-row1-col3",
"depends-on": [
{
"configs":[
"dolphin-alert/enterprise.wechat.enable"
],
"if": "${dolphin-alert/enterprise.wechat.enable}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-alert/enterprise.wechat.secret",
"subsection-name": "dynamic-row1-col3",
"depends-on": [
{
"configs":[
"dolphin-alert/enterprise.wechat.enable"
],
"if": "${dolphin-alert/enterprise.wechat.enable}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-alert/enterprise.wechat.agent.id",
"subsection-name": "dynamic-row1-col3",
"depends-on": [
{
"configs":[
"dolphin-alert/enterprise.wechat.enable"
],
"if": "${dolphin-alert/enterprise.wechat.enable}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
},
{
"config": "dolphin-alert/enterprise.wechat.users",
"subsection-name": "dynamic-row1-col3",
"depends-on": [
{
"configs":[
"dolphin-alert/enterprise.wechat.enable"
],
"if": "${dolphin-alert/enterprise.wechat.enable}",
"then": {
"property_value_attributes": {
"visible": true
}
},
"else": {
"property_value_attributes": {
"visible": false
}
}
}
]
}
]
},
"widgets": [
{
"config": "dolphin-env/dolphin.database.type",
"widget": {
"type": "combo"
}
},
{
"config": "dolphin-env/dolphin.database.host",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-env/dolphin.database.port",
"widget": {
"type": "text-field",
"units": [
{
"unit-name": "int"
}
]
}
},
{
"config": "dolphin-env/dolphin.database.username",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-env/dolphin.database.password",
"widget": {
"type": "password"
}
},
{
"config": "dolphin-env/dolphin.user",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-env/dolphin.group",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-env/dolphinscheduler-env-content",
"widget": {
"type": "text-area"
}
},
{
"config": "dolphin-common/resource.storage.type",
"widget": {
"type": "combo"
}
},
{
"config": "dolphin-common/resource.upload.path",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/hdfs.root.user",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/data.store2hdfs.basepath",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/fs.defaultFS",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/fs.s3a.endpoint",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/fs.s3a.access.key",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/fs.s3a.secret.key",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/hadoop.security.authentication.startup.state",
"widget": {
"type": "toggle"
}
},
{
"config": "dolphin-common/java.security.krb5.conf.path",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/login.user.keytab.username",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/login.user.keytab.path",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-common/kerberos.expire.time",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-alert/enterprise.wechat.enable",
"widget": {
"type": "toggle"
}
},
{
"config": "dolphin-alert/enterprise.wechat.corp.id",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-alert/enterprise.wechat.secret",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-alert/enterprise.wechat.agent.id",
"widget": {
"type": "text-field"
}
},
{
"config": "dolphin-alert/enterprise.wechat.users",
"widget": {
"type": "text-field"
}
}
]
}
}

BIN
ambari_plugin/readme.pdf

Binary file not shown.

2
ambari_plugin/statcks/DOLPHIN/metainfo.xml

@ -20,7 +20,7 @@
<services>
<service>
<name>DOLPHIN</name>
<extends>common-services/DOLPHIN/1.2.1</extends>
<extends>common-services/DOLPHIN/1.3.0</extends>
</service>
</services>
</metainfo>

226
charts/dolphinscheduler/README.md

@ -1,226 +0,0 @@
# Dolphin Scheduler
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
## Introduction
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler
$ helm install --name dolphinscheduler .
```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `dolphinscheduler` deployment:
```bash
$ helm delete --purge dolphinscheduler
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
| | | |
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
| | | |
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `ingress.enabled` | Enable ingress | `false` |
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
| `ingress.path` | Ingress path | `/` |
| `ingress.tls.enabled` | Enable ingress tls | `false` |
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.

1
docker/docker-compose.yml

@ -23,6 +23,7 @@ services:
- "2181:2181"
environment:
ZOO_MY_ID: 1
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
db:
image: postgres
container_name: postgres

233
docker/docker-swarm/docker-compose.yml

@ -0,0 +1,233 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "3.4"
services:
dolphinscheduler-postgresql:
image: bitnami/postgresql:latest
container_name: dolphinscheduler-postgresql
ports:
- 5432:5432
environment:
TZ: Asia/Shanghai
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
networks:
- dolphinscheduler
dolphinscheduler-zookeeper:
image: bitnami/zookeeper:latest
container_name: dolphinscheduler-zookeeper
ports:
- 2181:2181
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
- dolphinscheduler
dolphinscheduler-api:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
container_name: dolphinscheduler-api
command: ["api-server"]
ports:
- 12345:12345
environment:
TZ: Asia/Shanghai
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-api:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-frontend:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
container_name: dolphinscheduler-frontend
command: ["frontend"]
ports:
- 8888:8888
environment:
TZ: Asia/Shanghai
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
FRONTEND_API_SERVER_PORT: 12345
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "8888"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-api
volumes:
- dolphinscheduler-frontend:/var/log/nginx
networks:
- dolphinscheduler
dolphinscheduler-alert:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
container_name: dolphinscheduler-alert
command: ["alert-server"]
environment:
TZ: Asia/Shanghai
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
ENTERPRISE_WECHAT_ENABLE: "false"
ENTERPRISE_WECHAT_CORP_ID: ""
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
volumes:
- dolphinscheduler-alert:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-master:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
container_name: dolphinscheduler-master
command: ["master-server"]
ports:
- 5678:5678
environment:
TZ: Asia/Shanghai
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
MASTER_HEARTBEAT_INTERVAL: "10"
MASTER_TASK_COMMIT_RETRYTIMES: "5"
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-master:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-worker:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
container_name: dolphinscheduler-worker
command: ["worker-server"]
ports:
- 1234:1234
- 50051:50051
environment:
TZ: Asia/Shanghai
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- type: bind
source: ./dolphinscheduler_env.sh
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
- type: volume
source: dolphinscheduler-worker-data
target: /tmp/dolphinscheduler
- type: volume
source: dolphinscheduler-worker-logs
target: /opt/dolphinscheduler/logs
networks:
- dolphinscheduler
networks:
dolphinscheduler:
driver: bridge
volumes:
dolphinscheduler-postgresql:
dolphinscheduler-zookeeper:
dolphinscheduler-api:
dolphinscheduler-frontend:
dolphinscheduler-alert:
dolphinscheduler-master:
dolphinscheduler-worker-data:
dolphinscheduler-worker-logs:
configs:
dolphinscheduler-worker-task-env:
file: ./dolphinscheduler_env.sh

230
docker/docker-swarm/docker-stack.yml

@ -0,0 +1,230 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "3.4"
services:
dolphinscheduler-postgresql:
image: bitnami/postgresql:latest
ports:
- 5432:5432
environment:
TZ: Asia/Shanghai
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
networks:
- dolphinscheduler
deploy:
mode: replicated
replicas: 1
dolphinscheduler-zookeeper:
image: bitnami/zookeeper:latest
ports:
- 2181:2181
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
- dolphinscheduler
deploy:
mode: replicated
replicas: 1
dolphinscheduler-api:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
command: ["api-server"]
ports:
- 12345:12345
environment:
TZ: Asia/Shanghai
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-api:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
mode: replicated
replicas: 1
dolphinscheduler-frontend:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
command: ["frontend"]
ports:
- 8888:8888
environment:
TZ: Asia/Shanghai
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
FRONTEND_API_SERVER_PORT: 12345
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "8888"]
interval: 30
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-frontend:/var/log/nginx
networks:
- dolphinscheduler
deploy:
mode: replicated
replicas: 1
dolphinscheduler-alert:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
command: ["alert-server"]
environment:
TZ: Asia/Shanghai
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
ENTERPRISE_WECHAT_ENABLE: "false"
ENTERPRISE_WECHAT_CORP_ID: ""
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
interval: 30
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-alert:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
mode: replicated
replicas: 1
dolphinscheduler-master:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
command: ["master-server"]
ports:
- 5678:5678
environment:
TZ: Asia/Shanghai
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
MASTER_HEARTBEAT_INTERVAL: "10"
MASTER_TASK_COMMIT_RETRYTIMES: "5"
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
interval: 30
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-master:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
mode: replicated
replicas: 1
dolphinscheduler-worker:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
command: ["worker-server"]
ports:
- 1234:1234
- 50051:50051
environment:
TZ: Asia/Shanghai
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
interval: 30
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
- dolphinscheduler-worker-logs:/opt/dolphinscheduler/logs
configs:
- source: dolphinscheduler-worker-task-env
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
networks:
- dolphinscheduler
deploy:
mode: replicated
replicas: 1
networks:
dolphinscheduler:
driver: overlay
volumes:
dolphinscheduler-postgresql:
dolphinscheduler-zookeeper:
dolphinscheduler-api:
dolphinscheduler-frontend:
dolphinscheduler-alert:
dolphinscheduler-master:
dolphinscheduler-worker-data:
dolphinscheduler-worker-logs:
configs:
dolphinscheduler-worker-task-env:
file: ./dolphinscheduler_env.sh

20
docker/docker-swarm/dolphinscheduler_env.sh

@ -0,0 +1,20 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
export PYTHON_HOME=/usr/bin/python2
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH

24
charts/dolphinscheduler/Chart.yaml → docker/kubernetes/dolphinscheduler/Chart.yaml

@ -21,8 +21,8 @@ description: Dolphin Scheduler is a distributed and easy-to-expand visual DAG wo
home: https://dolphinscheduler.apache.org
icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
keywords:
- dolphinscheduler
- Scheduler
- dolphinscheduler
- Scheduler
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
@ -35,18 +35,18 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.0
version: 1.0.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 1.2.1
appVersion: 1.3.0
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

4
charts/README.md → docker/kubernetes/dolphinscheduler/README.md

@ -16,7 +16,9 @@ To install the chart with the release name `my-release`:
```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler
$ cd incubator-dolphinscheduler/kubernetes/dolphinscheduler
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm dependency update .
$ helm install --name dolphinscheduler .
```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.

25
docker/kubernetes/dolphinscheduler/requirements.yaml

@ -0,0 +1,25 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

0
charts/dolphinscheduler/templates/NOTES.txt → docker/kubernetes/dolphinscheduler/templates/NOTES.txt

16
charts/dolphinscheduler/templates/_helpers.tpl → docker/kubernetes/dolphinscheduler/templates/_helpers.tpl

@ -130,20 +130,4 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- define "dolphinscheduler.worker.base.dir" -}}
{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker data download dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.data.download.dir" -}}
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/download" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker process exec dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.process.exec.dir" -}}
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/exec" -}}
{{- end -}}

0
charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml → docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml

2
charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml → docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml

@ -31,4 +31,6 @@ data:
MASTER_TASK_COMMIT_INTERVAL: {{ .Values.master.configmap.MASTER_TASK_COMMIT_INTERVAL | quote }}
MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
MASTER_LISTEN_PORT: {{ .Values.master.configmap.MASTER_LISTEN_PORT | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
{{- end }}

4
charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml → docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml

@ -29,9 +29,9 @@ data:
WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
WORKER_LISTEN_PORT: {{ .Values.worker.configmap.WORKER_LISTEN_PORT | quote }}
WORKER_GROUP: {{ .Values.worker.configmap.WORKER_GROUP | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH: {{ include "dolphinscheduler.worker.data.download.dir" . | quote }}
DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH: {{ include "dolphinscheduler.worker.process.exec.dir" . | quote }}
dolphinscheduler_env.sh: |-
{{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
{{ . }}

18
charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml → docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml

@ -166,19 +166,19 @@ spec:
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
@ -188,14 +188,20 @@ spec:
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
{{- end }}
- name: POSTGRESQL_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
{{- if .Values.alert.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
- AlertServer
initialDelaySeconds: {{ .Values.alert.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.alert.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.alert.livenessProbe.timeoutSeconds }}
@ -208,7 +214,7 @@ spec:
command:
- sh
- /root/checkpoint.sh
- worker-server
- AlertServer
initialDelaySeconds: {{ .Values.alert.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.alert.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.alert.readinessProbe.timeoutSeconds }}

12
charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml → docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml

@ -99,19 +99,19 @@ spec:
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
@ -122,6 +122,12 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: POSTGRESQL_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"

0
charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml → docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml

0
charts/dolphinscheduler/templates/ingress.yaml → docker/kubernetes/dolphinscheduler/templates/ingress.yaml

0
charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml → docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml

0
charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml → docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml

0
charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml → docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml

0
charts/dolphinscheduler/templates/secret-external-postgresql.yaml → docker/kubernetes/dolphinscheduler/templates/secret-external-postgresql.yaml

26
charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml → docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml

@ -109,8 +109,8 @@ spec:
args:
- "master-server"
ports:
- containerPort: 8888
name: unused-tcp-port
- containerPort: {{ .Values.master.configmap.MASTER_LISTEN_PORT }}
name: "master-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
@ -150,6 +150,16 @@ spec:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_RESERVED_MEMORY
- name: MASTER_LISTEN_PORT
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_LISTEN_PORT
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
@ -178,11 +188,11 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: TASK_QUEUE
{{- if .Values.zookeeper.enabled }}
value: {{ .Values.zookeeper.taskQueue }}
- name: POSTGRESQL_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalZookeeper.taskQueue }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
@ -196,7 +206,7 @@ spec:
command:
- sh
- /root/checkpoint.sh
- master-server
- MasterServer
initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
@ -209,7 +219,7 @@ spec:
command:
- sh
- /root/checkpoint.sh
- master-server
- MasterServer
initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}

35
charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml → docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml

@ -109,6 +109,8 @@ spec:
args:
- "worker-server"
ports:
- containerPort: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }}
name: "worker-port"
- containerPort: 50051
name: "logs-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
@ -140,6 +142,21 @@ spec:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_RESERVED_MEMORY
- name: WORKER_LISTEN_PORT
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_LISTEN_PORT
- name: WORKER_GROUP
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_GROUP
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
@ -167,12 +184,12 @@ spec:
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: TASK_QUEUE
{{- if .Values.zookeeper.enabled }}
value: {{ .Values.zookeeper.taskQueue }}
{{- end }}
- name: POSTGRESQL_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalZookeeper.taskQueue }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
@ -186,7 +203,7 @@ spec:
command:
- sh
- /root/checkpoint.sh
- worker-server
- WorkerServer
initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds }}
@ -199,7 +216,7 @@ spec:
command:
- sh
- /root/checkpoint.sh
- worker-server
- WorkerServer
initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds }}
@ -247,7 +264,7 @@ spec:
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }}
{{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }}
@ -264,7 +281,7 @@ spec:
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }}
{{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }}

0
charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml → docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml

0
charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml → docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml

6
charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml → docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml

@ -25,10 +25,10 @@ metadata:
spec:
clusterIP: "None"
ports:
- port: 8888
targetPort: tcp-port
- port: {{ .Values.master.configmap.MASTER_LISTEN_PORT }}
targetPort: master-port
protocol: TCP
name: unused-tcp-port
name: master-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}

4
charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml → docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml

@ -25,6 +25,10 @@ metadata:
spec:
clusterIP: "None"
ports:
- port: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }}
targetPort: worker-port
protocol: TCP
name: worker-port
- port: 50051
targetPort: logs-port
protocol: TCP

7
charts/dolphinscheduler/values.yaml → docker/kubernetes/dolphinscheduler/values.yaml

@ -27,7 +27,7 @@ timezone: "Asia/Shanghai"
image:
registry: "docker.io"
repository: "dolphinscheduler"
tag: "1.2.1"
tag: "1.3.0"
pullPolicy: "IfNotPresent"
imagePullSecrets: []
@ -56,6 +56,8 @@ externalDatabase:
zookeeper:
enabled: true
taskQueue: "zookeeper"
service:
port: "2181"
persistence:
enabled: false
size: "20Gi"
@ -91,6 +93,7 @@ master:
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
MASTER_LISTEN_PORT: "5678"
livenessProbe:
enabled: true
initialDelaySeconds: "30"
@ -156,6 +159,8 @@ worker:
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
WORKER_LISTEN_PORT: "1234"
WORKER_GROUP: "default"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
DOLPHINSCHEDULER_ENV:
- "export HADOOP_HOME=/opt/soft/hadoop"

3
dockerfile/Dockerfile

@ -27,7 +27,7 @@ ENV DEBIAN_FRONTEND noninteractive
#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
RUN apk update && \
apk add dos2unix shadow bash openrc python sudo vim wget iputils net-tools openssh-server py2-pip tini && \
apk add dos2unix shadow bash openrc python python3 sudo vim wget iputils net-tools openssh-server py2-pip tini && \
apk add --update procps && \
openrc boot && \
pip install kazoo
@ -67,6 +67,7 @@ ADD ./checkpoint.sh /root/checkpoint.sh
ADD ./startup-init-conf.sh /root/startup-init-conf.sh
ADD ./startup.sh /root/startup.sh
ADD ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/
ADD ./conf/dolphinscheduler/logback/* /opt/dolphinscheduler/conf/
ADD conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/
RUN chmod +x /root/checkpoint.sh && \
chmod +x /root/startup-init-conf.sh && \

12
dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env.sh vendored

@ -15,12 +15,6 @@
# limitations under the License.
#
export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/opt/soft/python
export JAVA_HOME=/opt/soft/java
export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH
export PYTHON_HOME=/usr/bin/python2
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
export PATH=$PYTHON_HOME/bin:$JAVA_HOME/bin:$PATH

52
dockerfile/conf/dolphinscheduler/logback/logback-alert.xml

@ -0,0 +1,52 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-alert.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>20</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
</root>
</configuration>

62
dockerfile/conf/dolphinscheduler/logback/logback-api.xml

@ -0,0 +1,62 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- api server logback config start -->
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-api-server.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- api server logback config end -->
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
</root>
</configuration>

82
dockerfile/conf/dolphinscheduler/logback/logback-master.xml

@ -0,0 +1,82 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<conversionRule conversionWord="messsage"
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/>
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
<key>taskAppId</key>
<logBase>${log.base}</logBase>
</Discriminator>
<sift>
<appender name="FILE-${taskAppId}" class="ch.qos.logback.core.FileAppender">
<file>${log.base}/${taskAppId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
</sift>
</appender>
<!-- master server logback config start -->
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-master.log</file>
<!--<filter class="org.apache.dolphinscheduler.server.log.MasterLogFilter">
<level>INFO</level>
</filter>-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- master server logback config end -->
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="MASTERLOGFILE"/>
</root>
</configuration>

26
dockerfile/conf/dolphinscheduler/conf/worker_logback.xml → dockerfile/conf/dolphinscheduler/logback/logback-worker.xml

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="UTF-8" ?>
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
@ -17,7 +17,8 @@
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
@ -27,11 +28,15 @@
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- worker server logback config start -->
<conversionRule conversionWord="messsage"
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"></filter>
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/>
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
<key>taskAppId</key>
<logBase>${log.base}</logBase>
@ -41,7 +46,7 @@
<file>${log.base}/${taskAppId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
</pattern>
<charset>UTF-8</charset>
</encoder>
@ -49,31 +54,30 @@
</appender>
</sift>
</appender>
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-worker.log</file>
<filter class="org.apache.dolphinscheduler.server.log.WorkerLogFilter">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.server.log.WorkerLogFilter"/>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
     
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
</pattern>
<charset>UTF-8</charset>
</encoder>
  
</appender>
<!-- worker server logback config end -->
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/>
</root>
</configuration>

6
dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl

@ -22,8 +22,8 @@ zookeeper.quorum=${ZOOKEEPER_QUORUM}
#zookeeper.dolphinscheduler.root=/dolphinscheduler
# dolphinscheduler failover directory
#zookeeper.session.timeout=300
#zookeeper.connection.timeout=300
#zookeeper.session.timeout=60000
#zookeeper.connection.timeout=30000
#zookeeper.retry.base.sleep=100
#zookeeper.retry.max.sleep=30000
#zookeeper.retry.maxtime=5
#zookeeper.retry.maxtime=10

23
dockerfile/startup.sh

@ -25,7 +25,9 @@ DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
# start postgresql
initPostgreSQL() {
echo "checking postgresql"
if [ -n "$(ifconfig | grep ${POSTGRESQL_HOST})" ]; then
if [[ "${POSTGRESQL_HOST}" = "127.0.0.1" || "${POSTGRESQL_HOST}" = "localhost" ]]; then
export PGPORT=${POSTGRESQL_PORT}
echo "start postgresql service"
rc-service postgresql restart
@ -47,10 +49,21 @@ initPostgreSQL() {
sudo -u postgres psql -tAc "grant all privileges on database dolphinscheduler to ${POSTGRESQL_USERNAME}"
fi
echo "test postgresql service"
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 5
done
echo "connect postgresql service"
v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1")
v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -p ${POSTGRESQL_PORT} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1")
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Can't connect to database...${v}"
echo "Error: Can't connect to database...${v}"
exit 1
fi
@ -70,10 +83,10 @@ initZK() {
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
log "Error: Couldn't connect to zookeeper."
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
log "Trying to connect to zookeeper at ${line}. Attempt $counter."
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 5
done
done

30
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/AlertServer.java

@ -16,8 +16,11 @@
*/
package org.apache.dolphinscheduler.alert;
import org.apache.dolphinscheduler.alert.plugin.EmailAlertPlugin;
import org.apache.dolphinscheduler.alert.runner.AlertSender;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.alert.utils.PropertyUtils;
import org.apache.dolphinscheduler.common.plugin.FilePluginManager;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.DaoFactory;
@ -41,34 +44,47 @@ public class AlertServer {
private static AlertServer instance;
public AlertServer() {
private FilePluginManager alertPluginManager;
private static final String[] whitePrefixes = new String[]{"org.apache.dolphinscheduler.plugin.utils."};
private static final String[] excludePrefixes = new String[]{
"org.apache.dolphinscheduler.plugin.",
"ch.qos.logback.",
"org.slf4j."
};
public AlertServer() {
alertPluginManager =
new FilePluginManager(PropertyUtils.getString(Constants.PLUGIN_DIR), whitePrefixes, excludePrefixes);
// add default alert plugins
alertPluginManager.addPlugin(new EmailAlertPlugin());
}
public synchronized static AlertServer getInstance(){
public synchronized static AlertServer getInstance() {
if (null == instance) {
instance = new AlertServer();
}
return instance;
}
public void start(){
public void start() {
logger.info("alert server ready start ");
while (Stopper.isRunning()){
while (Stopper.isRunning()) {
try {
Thread.sleep(Constants.ALERT_SCAN_INTERVAL);
} catch (InterruptedException e) {
logger.error(e.getMessage(),e);
logger.error(e.getMessage(), e);
Thread.currentThread().interrupt();
}
List<Alert> alerts = alertDao.listWaitExecutionAlert();
alertSender = new AlertSender(alerts, alertDao);
alertSender = new AlertSender(alerts, alertDao, alertPluginManager);
alertSender.run();
}
}
public static void main(String[] args){
public static void main(String[] args) {
AlertServer alertServer = AlertServer.getInstance();
alertServer.start();
}

5
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EmailManager.java

@ -17,7 +17,6 @@
package org.apache.dolphinscheduler.alert.manager;
import org.apache.dolphinscheduler.alert.utils.MailUtils;
import org.apache.dolphinscheduler.common.enums.ShowType;
import java.util.List;
import java.util.Map;
@ -35,7 +34,7 @@ public class EmailManager {
* @param showType the showType
* @return the send result
*/
public Map<String,Object> send(List<String> receviersList,List<String> receviersCcList,String title,String content,ShowType showType){
public Map<String,Object> send(List<String> receviersList,List<String> receviersCcList,String title,String content,String showType){
return MailUtils.sendMails(receviersList, receviersCcList, title, content, showType);
}
@ -48,7 +47,7 @@ public class EmailManager {
* @param showType the showType
* @return the send result
*/
public Map<String,Object> send(List<String> receviersList,String title,String content,ShowType showType){
public Map<String,Object> send(List<String> receviersList,String title,String content,String showType){
return MailUtils.sendMails(receviersList,title, content, showType);
}

10
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EnterpriseWeChatManager.java

@ -18,7 +18,7 @@ package org.apache.dolphinscheduler.alert.manager;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.alert.utils.EnterpriseWeChatUtils;
import org.apache.dolphinscheduler.dao.entity.Alert;
import org.apache.dolphinscheduler.plugin.model.AlertInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -35,18 +35,18 @@ public class EnterpriseWeChatManager {
private static final Logger logger = LoggerFactory.getLogger(EnterpriseWeChatManager.class);
/**
* Enterprise We Chat send
* @param alert the alert
* @param alertInfo the alert info
* @param token the token
* @return the send result
*/
public Map<String,Object> send(Alert alert, String token){
public Map<String,Object> send(AlertInfo alertInfo, String token){
Map<String,Object> retMap = new HashMap<>();
retMap.put(Constants.STATUS, false);
String agentId = EnterpriseWeChatUtils.ENTERPRISE_WE_CHAT_AGENT_ID;
String users = EnterpriseWeChatUtils.ENTERPRISE_WE_CHAT_USERS;
List<String> userList = Arrays.asList(users.split(","));
logger.info("send message {}",alert);
String msg = EnterpriseWeChatUtils.makeUserSendMsg(userList, agentId,EnterpriseWeChatUtils.markdownByAlert(alert));
logger.info("send message {}", alertInfo.getAlertData().getTitle());
String msg = EnterpriseWeChatUtils.makeUserSendMsg(userList, agentId,EnterpriseWeChatUtils.markdownByAlert(alertInfo.getAlertData()));
try {
EnterpriseWeChatUtils.sendEnterpriseWeChat(Constants.UTF_8, msg, token);
} catch (IOException e) {

133
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java

@ -0,0 +1,133 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.alert.plugin;
import org.apache.dolphinscheduler.alert.manager.EmailManager;
import org.apache.dolphinscheduler.alert.manager.EnterpriseWeChatManager;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.alert.utils.EnterpriseWeChatUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.plugin.api.AlertPlugin;
import org.apache.dolphinscheduler.plugin.model.AlertData;
import org.apache.dolphinscheduler.plugin.model.AlertInfo;
import org.apache.dolphinscheduler.plugin.model.PluginName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* EmailAlertPlugin
*
* This plugin is a default plugin, and mix up email and enterprise wechat, because adapt with former alert behavior
*/
public class EmailAlertPlugin implements AlertPlugin {
private static final Logger logger = LoggerFactory.getLogger(EmailAlertPlugin.class);
private PluginName pluginName;
private static final EmailManager emailManager = new EmailManager();
private static final EnterpriseWeChatManager weChatManager = new EnterpriseWeChatManager();
public EmailAlertPlugin() {
this.pluginName = new PluginName();
this.pluginName.setEnglish(Constants.PLUGIN_DEFAULT_EMAIL_EN);
this.pluginName.setChinese(Constants.PLUGIN_DEFAULT_EMAIL_CH);
}
@Override
public String getId() {
return Constants.PLUGIN_DEFAULT_EMAIL;
}
@Override
public PluginName getName() {
return pluginName;
}
@Override
@SuppressWarnings("unchecked")
public Map<String, Object> process(AlertInfo info) {
Map<String, Object> retMaps = new HashMap<>();
AlertData alert = info.getAlertData();
List<String> receviersList = (List<String>) info.getProp(Constants.PLUGIN_DEFAULT_EMAIL_RECEIVERS);
// receiving group list
// custom receiver
String receivers = alert.getReceivers();
if (StringUtils.isNotEmpty(receivers)) {
String[] splits = receivers.split(",");
receviersList.addAll(Arrays.asList(splits));
}
List<String> receviersCcList = new ArrayList<>();
// Custom Copier
String receiversCc = alert.getReceiversCc();
if (StringUtils.isNotEmpty(receiversCc)) {
String[] splits = receiversCc.split(",");
receviersCcList.addAll(Arrays.asList(splits));
}
if (CollectionUtils.isEmpty(receviersList) && CollectionUtils.isEmpty(receviersCcList)) {
logger.warn("alert send error : At least one receiver address required");
retMaps.put(Constants.STATUS, "false");
retMaps.put(Constants.MESSAGE, "execution failure,At least one receiver address required.");
return retMaps;
}
retMaps = emailManager.send(receviersList, receviersCcList, alert.getTitle(), alert.getContent(),
alert.getShowType());
//send flag
boolean flag = false;
if (retMaps == null) {
retMaps = new HashMap<>();
retMaps.put(Constants.MESSAGE, "alert send error.");
retMaps.put(Constants.STATUS, "false");
logger.info("alert send error : {}", retMaps.get(Constants.MESSAGE));
return retMaps;
}
flag = Boolean.parseBoolean(String.valueOf(retMaps.get(Constants.STATUS)));
if (flag) {
logger.info("alert send success");
retMaps.put(Constants.MESSAGE, "email send success.");
if (EnterpriseWeChatUtils.isEnable()) {
logger.info("Enterprise WeChat is enable!");
try {
String token = EnterpriseWeChatUtils.getToken();
weChatManager.send(info, token);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
} else {
retMaps.put(Constants.MESSAGE, "alert send error.");
logger.info("alert send error : {}", retMaps.get(Constants.MESSAGE));
}
return retMaps;
}
}

130
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java

@ -16,139 +16,85 @@
*/
package org.apache.dolphinscheduler.alert.runner;
import org.apache.dolphinscheduler.alert.manager.EmailManager;
import org.apache.dolphinscheduler.alert.manager.EnterpriseWeChatManager;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.alert.utils.EnterpriseWeChatUtils;
import org.apache.dolphinscheduler.common.enums.AlertStatus;
import org.apache.dolphinscheduler.common.enums.AlertType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.plugin.PluginManager;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.entity.Alert;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.plugin.api.AlertPlugin;
import org.apache.dolphinscheduler.plugin.model.AlertData;
import org.apache.dolphinscheduler.plugin.model.AlertInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
/**
* alert sender
*/
public class AlertSender{
public class AlertSender {
private static final Logger logger = LoggerFactory.getLogger(AlertSender.class);
private static final EmailManager emailManager= new EmailManager();
private static final EnterpriseWeChatManager weChatManager= new EnterpriseWeChatManager();
private List<Alert> alertList;
private AlertDao alertDao;
private PluginManager pluginManager;
public AlertSender() {
}
public AlertSender(){}
public AlertSender(List<Alert> alertList, AlertDao alertDao){
public AlertSender(List<Alert> alertList, AlertDao alertDao, PluginManager pluginManager) {
super();
this.alertList = alertList;
this.alertDao = alertDao;
this.pluginManager = pluginManager;
}
public void run() {
List<User> users;
Map<String, Object> retMaps = null;
for(Alert alert:alertList){
for (Alert alert : alertList) {
users = alertDao.listUserByAlertgroupId(alert.getAlertGroupId());
// receiving group list
List<String> receviersList = new ArrayList<>();
for(User user:users){
for (User user : users) {
receviersList.add(user.getEmail());
}
// custom receiver
String receivers = alert.getReceivers();
if (StringUtils.isNotEmpty(receivers)){
String[] splits = receivers.split(",");
receviersList.addAll(Arrays.asList(splits));
}
// copy list
List<String> receviersCcList = new ArrayList<>();
// Custom Copier
String receiversCc = alert.getReceiversCc();
if (StringUtils.isNotEmpty(receiversCc)){
String[] splits = receiversCc.split(",");
receviersCcList.addAll(Arrays.asList(splits));
}
if (CollectionUtils.isEmpty(receviersList) && CollectionUtils.isEmpty(receviersCcList)) {
logger.warn("alert send error : At least one receiver address required");
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, "execution failure,At least one receiver address required.", alert.getId());
continue;
}
if (alert.getAlertType() == AlertType.EMAIL){
retMaps = emailManager.send(receviersList,receviersCcList, alert.getTitle(), alert.getContent(),alert.getShowType());
alert.setInfo(retMaps);
}else if (alert.getAlertType() == AlertType.SMS){
retMaps = emailManager.send(getReciversForSMS(users), alert.getTitle(), alert.getContent(),alert.getShowType());
alert.setInfo(retMaps);
AlertData alertData = new AlertData();
alertData.setId(alert.getId())
.setAlertGroupId(alert.getAlertGroupId())
.setContent(alert.getContent())
.setLog(alert.getLog())
.setReceivers(alert.getReceivers())
.setReceiversCc(alert.getReceiversCc())
.setShowType(alert.getShowType().getDescp())
.setTitle(alert.getTitle());
AlertInfo alertInfo = new AlertInfo();
alertInfo.setAlertData(alertData);
alertInfo.addProp("receivers", receviersList);
AlertPlugin emailPlugin = pluginManager.findOne(Constants.PLUGIN_DEFAULT_EMAIL);
retMaps = emailPlugin.process(alertInfo);
if (retMaps == null) {
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, "alert send error", alert.getId());
logger.info("alert send error : return value is null");
} else if (!Boolean.parseBoolean(String.valueOf(retMaps.get(Constants.STATUS)))) {
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, String.valueOf(retMaps.get(Constants.MESSAGE)), alert.getId());
logger.info("alert send error : {}", retMaps.get(Constants.MESSAGE));
} else {
logger.error("AlertType is not defined. code: {}, descp: {}",
alert.getAlertType().getCode(),
alert.getAlertType().getDescp());
return;
}
//send flag
boolean flag = false;
if (null != retMaps) {
flag = Boolean.parseBoolean(String.valueOf(retMaps.get(Constants.STATUS)));
}
if (flag) {
alertDao.updateAlert(AlertStatus.EXECUTION_SUCCESS, "execution success", alert.getId());
alertDao.updateAlert(AlertStatus.EXECUTION_SUCCESS, (String) retMaps.get(Constants.MESSAGE), alert.getId());
logger.info("alert send success");
if (EnterpriseWeChatUtils.isEnable()) {
logger.info("Enterprise WeChat is enable!");
try {
String token = EnterpriseWeChatUtils.getToken();
weChatManager.send(alert, token);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
} else {
if (null != retMaps) {
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, String.valueOf(retMaps.get(Constants.MESSAGE)), alert.getId());
logger.info("alert send error : {}", retMaps.get(Constants.MESSAGE));
}
}
}
}
/**
* get a list of SMS users
* @param users
* @return
*/
private List<String> getReciversForSMS(List<User> users){
List<String> list = new ArrayList<>();
for (User user : users){
list.add(user.getPhone());
}
return list;
}
}

17
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java

@ -156,4 +156,21 @@ public class Constants {
public static final String ENTERPRISE_WECHAT_AGENT_ID = "enterprise.wechat.agent.id";
public static final String ENTERPRISE_WECHAT_USERS = "enterprise.wechat.users";
/**
* plugin config
*/
public static final String PLUGIN_DIR = "plugin.dir";
public static final String PLUGIN_DEFAULT_EMAIL = "email";
public static final String PLUGIN_DEFAULT_EMAIL_CH = "邮件";
public static final String PLUGIN_DEFAULT_EMAIL_EN = "email";
public static final String PLUGIN_DEFAULT_EMAIL_RECEIVERS = "receivers";
public static final String PLUGIN_DEFAULT_EMAIL_RECEIVERCCS = "receiverCcs";
public static final String RETMAP_MSG = "msg";
}

17
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtils.java

@ -18,10 +18,10 @@ package org.apache.dolphinscheduler.alert.utils;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Alert;
import com.alibaba.fastjson.JSON;
import com.google.common.reflect.TypeToken;
import org.apache.dolphinscheduler.plugin.model.AlertData;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
@ -66,15 +66,17 @@ public class EnterpriseWeChatUtils {
* get Enterprise WeChat is enable
* @return isEnable
*/
public static Boolean isEnable(){
Boolean isEnable = false;
public static boolean isEnable(){
Boolean isEnable = null;
try {
isEnable = PropertyUtils.getBoolean(Constants.ENTERPRISE_WECHAT_ENABLE);
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
if (isEnable == null) {
return false;
}
return isEnable;
}
/**
@ -253,14 +255,13 @@ public class EnterpriseWeChatUtils {
/**
* Determine the mardown style based on the show type of the alert
* @param alert the alert
* @return the markdown alert table/text
*/
public static String markdownByAlert(Alert alert){
public static String markdownByAlert(AlertData alert){
String result = "";
if (alert.getShowType() == ShowType.TABLE) {
if (alert.getShowType().equals(ShowType.TABLE.getDescp())) {
result = markdownTable(alert.getTitle(),alert.getContent());
}else if(alert.getShowType() == ShowType.TEXT){
}else if(alert.getShowType().equals(ShowType.TEXT.getDescp())){
result = markdownText(alert.getTitle(),alert.getContent());
}
return result;

16
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java

@ -74,7 +74,7 @@ public class MailUtils {
* @param showType the show type
* @return the result map
*/
public static Map<String,Object> sendMails(Collection<String> receivers, String title, String content,ShowType showType) {
public static Map<String,Object> sendMails(Collection<String> receivers, String title, String content,String showType) {
return sendMails(receivers, null, title, content, showType);
}
@ -87,7 +87,7 @@ public class MailUtils {
* @param showType the show type
* @return the send result
*/
public static Map<String,Object> sendMails(Collection<String> receivers, Collection<String> receiversCc, String title, String content, ShowType showType) {
public static Map<String,Object> sendMails(Collection<String> receivers, Collection<String> receiversCc, String title, String content, String showType) {
Map<String,Object> retMap = new HashMap<>();
retMap.put(Constants.STATUS, false);
@ -98,7 +98,7 @@ public class MailUtils {
receivers.removeIf(StringUtils::isEmpty);
if (showType == ShowType.TABLE || showType == ShowType.TEXT){
if (showType.equals(ShowType.TABLE.getDescp()) || showType.equals(ShowType.TEXT.getDescp())) {
// send email
HtmlEmail email = new HtmlEmail();
@ -125,10 +125,10 @@ public class MailUtils {
} catch (Exception e) {
handleException(receivers, retMap, e);
}
}else if (showType == ShowType.ATTACHMENT || showType == ShowType.TABLEATTACHMENT){
}else if (showType.equals(ShowType.ATTACHMENT.getDescp()) || showType.equals(ShowType.TABLEATTACHMENT.getDescp())) {
try {
String partContent = (showType == ShowType.ATTACHMENT ? "Please see the attachment " + title + Constants.EXCEL_SUFFIX_XLS : htmlTable(content,false));
String partContent = (showType.equals(ShowType.ATTACHMENT.getDescp()) ? "Please see the attachment " + title + Constants.EXCEL_SUFFIX_XLS : htmlTable(content,false));
attachment(receivers,receiversCc,title,content,partContent);
@ -290,7 +290,7 @@ public class MailUtils {
* @return the result map
* @throws EmailException
*/
private static Map<String, Object> getStringObjectMap(String title, String content, ShowType showType, Map<String, Object> retMap, HtmlEmail email) throws EmailException {
private static Map<String, Object> getStringObjectMap(String title, String content, String showType, Map<String, Object> retMap, HtmlEmail email) throws EmailException {
/**
* the subject of the message to be sent
@ -299,9 +299,9 @@ public class MailUtils {
/**
* to send information, you can use HTML tags in mail content because of the use of HtmlEmail
*/
if (showType == ShowType.TABLE) {
if (showType.equals(ShowType.TABLE.getDescp())) {
email.setMsg(htmlTable(content));
} else if (showType == ShowType.TEXT) {
} else if (showType.equals(ShowType.TEXT.getDescp())) {
email.setMsg(htmlText(content));
}

2
dolphinscheduler-alert/src/main/resources/alert.properties

@ -45,5 +45,5 @@ enterprise.wechat.enable=false
#enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
#enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
plugin.dir=/Users/xx/your/path/to/plugin/dir

80
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java

@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.alert.plugin;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.plugin.api.AlertPlugin;
import org.apache.dolphinscheduler.plugin.model.AlertData;
import org.apache.dolphinscheduler.plugin.model.AlertInfo;
import org.apache.dolphinscheduler.plugin.model.PluginName;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.*;
public class EmailAlertPluginTest {
private static final Logger logger = LoggerFactory.getLogger(EmailAlertPluginTest.class);
private AlertPlugin plugin;
@Before
public void before() {
plugin = new EmailAlertPlugin();
}
@Test
public void getId() {
String id = plugin.getId();
assertEquals(Constants.PLUGIN_DEFAULT_EMAIL, id);
}
@Test
public void getName() {
PluginName pluginName = plugin.getName();
assertEquals(Constants.PLUGIN_DEFAULT_EMAIL_CH, pluginName.getChinese());
assertEquals(Constants.PLUGIN_DEFAULT_EMAIL_EN, pluginName.getEnglish());
}
@Test
public void process() {
AlertInfo alertInfo = new AlertInfo();
AlertData alertData = new AlertData();
alertData.setId(1)
.setAlertGroupId(1)
.setContent("[\"alarm time:2018-02-05\", \"service name:MYSQL_ALTER\", \"alarm name:MYSQL_ALTER_DUMP\", " +
"\"get the alarm exception.!,interface error,exception information:timed out\", \"request address:http://blog.csdn.net/dreamInTheWorld/article/details/78539286\"]")
.setLog("test log")
.setReceivers("xx@xx.com")
.setReceiversCc("xx@xx.com")
.setShowType(ShowType.TEXT.getDescp())
.setTitle("test title");
alertInfo.setAlertData(alertData);
List<String> list = new ArrayList<String>(){{ add("xx@xx.com"); }};
alertInfo.addProp("receivers", list);
Map<String, Object> ret = plugin.process(alertInfo);
assertFalse(Boolean.parseBoolean(String.valueOf(ret.get(Constants.STATUS))));
}
}

27
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java

@ -20,7 +20,9 @@ import com.alibaba.fastjson.JSON;
import org.apache.dolphinscheduler.common.enums.AlertType;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.dao.entity.Alert;
import org.apache.dolphinscheduler.plugin.model.AlertData;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -54,11 +56,19 @@ public class EnterpriseWeChatUtilsTest {
private static final String enterpriseWechatUsers="LiGang,journey";
private static final String msg = "hello world";
private static final String enterpriseWechatTeamSendMsg = "{\\\"toparty\\\":\\\"$toParty\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"text\\\",\\\"text\\\":{\\\"content\\\":\\\"$msg\\\"},\\\"safe\\\":\\\"0\\\"}";
private static final String enterpriseWechatUserSendMsg = "{\\\"touser\\\":\\\"$toUser\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"markdown\\\",\\\"markdown\\\":{\\\"content\\\":\\\"$msg\\\"}}";
@Test
public void testIsEnable(){
@Before
public void init(){
PowerMockito.mockStatic(PropertyUtils.class);
Mockito.when(PropertyUtils.getBoolean(Constants.ENTERPRISE_WECHAT_ENABLE)).thenReturn(true);
Mockito.when(PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_USER_SEND_MSG)).thenReturn(enterpriseWechatUserSendMsg);
Mockito.when(PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_TEAM_SEND_MSG)).thenReturn(enterpriseWechatTeamSendMsg);
}
@Test
public void testIsEnable(){
Boolean weChartEnable = EnterpriseWeChatUtils.isEnable();
Assert.assertTrue(weChartEnable);
}
@ -88,6 +98,7 @@ public class EnterpriseWeChatUtilsTest {
@Test
public void tesMakeUserSendMsg1(){
String sendMsg = EnterpriseWeChatUtils.makeUserSendMsg(enterpriseWechatUsers, enterpriseWechatAgentId, msg);
Assert.assertTrue(sendMsg.contains(enterpriseWechatUsers));
Assert.assertTrue(sendMsg.contains(enterpriseWechatAgentId));
@ -110,14 +121,22 @@ public class EnterpriseWeChatUtilsTest {
@Test
public void testMarkdownByAlertForText(){
Alert alertForText = createAlertForText();
String result = EnterpriseWeChatUtils.markdownByAlert(alertForText);
AlertData alertData = new AlertData();
alertData.setTitle(alertForText.getTitle())
.setShowType(alertForText.getShowType().getDescp())
.setContent(alertForText.getContent());
String result = EnterpriseWeChatUtils.markdownByAlert(alertData);
Assert.assertNotNull(result);
}
@Test
public void testMarkdownByAlertForTable(){
Alert alertForText = createAlertForTable();
String result = EnterpriseWeChatUtils.markdownByAlert(alertForText);
AlertData alertData = new AlertData();
alertData.setTitle(alertForText.getTitle())
.setShowType(alertForText.getShowType().getDescp())
.setContent(alertForText.getContent());
String result = EnterpriseWeChatUtils.markdownByAlert(alertData);
Assert.assertNotNull(result);
}

10
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/MailUtilsTest.java

@ -58,7 +58,7 @@ public class MailUtilsTest {
alert.setAlertType(AlertType.EMAIL);
alert.setAlertGroupId(4);
MailUtils.sendMails(Arrays.asList(receivers),Arrays.asList(receiversCc),alert.getTitle(),alert.getContent(), ShowType.TEXT);
MailUtils.sendMails(Arrays.asList(receivers),Arrays.asList(receiversCc),alert.getTitle(),alert.getContent(), ShowType.TEXT.getDescp());
}
@ -70,7 +70,7 @@ public class MailUtilsTest {
String[] mails = new String[]{"xx@xx.com"};
for(Alert alert : alerts){
MailUtils.sendMails(Arrays.asList(mails),"gaojing", alert.getContent(), alert.getShowType());
MailUtils.sendMails(Arrays.asList(mails),"gaojing", alert.getContent(), ShowType.TABLE.getDescp());
}
}
@ -111,7 +111,7 @@ public class MailUtilsTest {
alert.setContent(content);
alert.setAlertType(AlertType.EMAIL);
alert.setAlertGroupId(1);
MailUtils.sendMails(Arrays.asList(mails),"gaojing", alert.getContent(), ShowType.TABLE);
MailUtils.sendMails(Arrays.asList(mails),"gaojing", alert.getContent(), ShowType.TABLE.getDescp());
}
/**
@ -170,7 +170,7 @@ public class MailUtilsTest {
alert.setContent(content);
alert.setAlertType(AlertType.EMAIL);
alert.setAlertGroupId(1);
MailUtils.sendMails(Arrays.asList(mails),"gaojing",alert.getContent(),ShowType.ATTACHMENT);
MailUtils.sendMails(Arrays.asList(mails),"gaojing",alert.getContent(),ShowType.ATTACHMENT.getDescp());
}
@Test
@ -183,7 +183,7 @@ public class MailUtilsTest {
alert.setContent(content);
alert.setAlertType(AlertType.EMAIL);
alert.setAlertGroupId(1);
MailUtils.sendMails(Arrays.asList(mails),"gaojing",alert.getContent(),ShowType.TABLEATTACHMENT);
MailUtils.sendMails(Arrays.asList(mails),"gaojing",alert.getContent(),ShowType.TABLEATTACHMENT.getDescp());
}
}

67
dolphinscheduler-alert/src/test/resources/alert.properties

@ -1,67 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For unit test
#alert type is EMAIL/SMS
alert.type=EMAIL
# mail server configuration
mail.protocol=SMTP
mail.server.host=xxx.xxx.test
mail.server.port=25
mail.sender=xxx@xxx.com
mail.user=xxx@xxx.com
mail.passwd=111111
# Test double
test.server.factor=3.0
# Test NumberFormat
test.server.testnumber=abc
# Test array
test.server.list=xxx.xxx.test1,xxx.xxx.test2,xxx.xxx.test3
# Test enum
test.server.enum1=MASTER
test.server.enum2=DEAD_SERVER
test.server.enum3=abc
# TLS
mail.smtp.starttls.enable=true
# SSL
mail.smtp.ssl.enable=false
mail.smtp.ssl.trust=xxx.xxx.com
#xls file path,need create if not exist
xls.file.path=/tmp/xls
# Enterprise WeChat configuration
enterprise.wechat.enable=false
enterprise.wechat.corp.id=xxxxxxx
enterprise.wechat.secret=xxxxxxx
enterprise.wechat.agent.id=xxxxxxx
enterprise.wechat.users=xxxxxxx
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}

48
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java

@ -94,6 +94,30 @@ public class ProcessDefinitionController extends BaseController {
return returnDataList(result);
}
/**
* copy process definition
*
* @param loginUser login user
* @param projectName project name
* @param processId process definition id
* @return copy result code
*/
@ApiOperation(value = "copyProcessDefinition", notes= "COPY_PROCESS_DEFINITION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping(value = "/copy")
@ResponseStatus(HttpStatus.OK)
@ApiException(COPY_PROCESS_DEFINITION_ERROR)
public Result copyProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam(value = "processId", required = true) int processId) throws JsonProcessingException {
logger.info("copy process definition, login user:{}, project name:{}, process definition id:{}",
loginUser.getUserName(), projectName, processId);
Map<String, Object> result = processDefinitionService.copyProcessDefinition(loginUser, projectName, processId);
return returnDataList(result);
}
/**
* verify process definition name unique
*
@ -425,30 +449,30 @@ public class ProcessDefinitionController extends BaseController {
}
/**
* export process definition by id
* batch export process definition by ids
*
* @param loginUser login user
* @param projectName project name
* @param processDefinitionId process definition id
* @param processDefinitionIds process definition ids
* @param response response
*/
@ApiOperation(value = "exportProcessDefinitionById", notes= "EXPORT_PROCESS_DEFINITION_BY_ID_NOTES")
@ApiOperation(value = "batchExportProcessDefinitionByIds", notes= "BATCH_EXPORT_PROCESS_DEFINITION_BY_IDS_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100")
@ApiImplicitParam(name = "processDefinitionIds", value = "PROCESS_DEFINITION_ID", required = true, dataType = "String")
})
@GetMapping(value = "/export")
@ResponseBody
public void exportProcessDefinitionById(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@PathVariable String projectName,
@RequestParam("processDefinitionId") Integer processDefinitionId,
HttpServletResponse response) {
public void batchExportProcessDefinitionByIds(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam("processDefinitionIds") String processDefinitionIds,
HttpServletResponse response) {
try {
logger.info("export process definition by id, login user:{}, project name:{}, process definition id:{}",
loginUser.getUserName(), projectName, processDefinitionId);
processDefinitionService.exportProcessDefinitionById(loginUser, projectName, processDefinitionId, response);
logger.info("batch export process definition by ids, login user:{}, project name:{}, process definition ids:{}",
loginUser.getUserName(), projectName, processDefinitionIds);
processDefinitionService.batchExportProcessDefinitionByIds(loginUser, projectName, processDefinitionIds, response);
} catch (Exception e) {
logger.error(Status.EXPORT_PROCESS_DEFINE_BY_ID_ERROR.getMsg(), e);
logger.error(Status.BATCH_EXPORT_PROCESS_DEFINE_BY_IDS_ERROR.getMsg(), e);
}
}

21
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java

@ -168,15 +168,13 @@ public enum Status {
PREVIEW_SCHEDULE_ERROR(10139,"preview schedule error", "预览调度配置错误"),
PARSE_TO_CRON_EXPRESSION_ERROR(10140,"parse cron to cron expression error", "解析调度表达式错误"),
SCHEDULE_START_TIME_END_TIME_SAME(10141,"The start time must not be the same as the end", "开始时间不能和结束时间一样"),
DELETE_TENANT_BY_ID_FAIL(100142,"delete tenant by id fail, for there are {0} process instances in executing using it", "删除租户失败,有[{0}]个运行中的工作流实例正在使用"),
DELETE_TENANT_BY_ID_FAIL_DEFINES(100143,"delete tenant by id fail, for there are {0} process definitions using it", "删除租户失败,有[{0}]个工作流定义正在使用"),
DELETE_TENANT_BY_ID_FAIL_USERS(100144,"delete tenant by id fail, for there are {0} users using it", "删除租户失败,有[{0}]个用户正在使用"),
DELETE_WORKER_GROUP_BY_ID_FAIL(100145,"delete worker group by id fail, for there are {0} process instances in executing using it", "删除Worker分组失败,有[{0}]个运行中的工作流实例正在使用"),
QUERY_WORKER_GROUP_FAIL(100146,"query worker group fail ", "查询worker分组失败"),
DELETE_WORKER_GROUP_FAIL(100147,"delete worker group fail ", "删除worker分组失败"),
DELETE_TENANT_BY_ID_FAIL(10142,"delete tenant by id fail, for there are {0} process instances in executing using it", "删除租户失败,有[{0}]个运行中的工作流实例正在使用"),
DELETE_TENANT_BY_ID_FAIL_DEFINES(10143,"delete tenant by id fail, for there are {0} process definitions using it", "删除租户失败,有[{0}]个工作流定义正在使用"),
DELETE_TENANT_BY_ID_FAIL_USERS(10144,"delete tenant by id fail, for there are {0} users using it", "删除租户失败,有[{0}]个用户正在使用"),
DELETE_WORKER_GROUP_BY_ID_FAIL(10145,"delete worker group by id fail, for there are {0} process instances in executing using it", "删除Worker分组失败,有[{0}]个运行中的工作流实例正在使用"),
QUERY_WORKER_GROUP_FAIL(10146,"query worker group fail ", "查询worker分组失败"),
DELETE_WORKER_GROUP_FAIL(10147,"delete worker group fail ", "删除worker分组失败"),
COPY_PROCESS_DEFINITION_ERROR(10148,"copy process definition error", "复制工作流错误"),
UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"),
UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"),
@ -216,8 +214,8 @@ public enum Status {
EXECUTE_PROCESS_INSTANCE_ERROR(50015,"execute process instance error", "操作工作流实例错误"),
CHECK_PROCESS_DEFINITION_ERROR(50016,"check process definition error", "检查工作流实例错误"),
QUERY_RECIPIENTS_AND_COPYERS_BY_PROCESS_DEFINITION_ERROR(50017,"query recipients and copyers by process definition error", "查询收件人和抄送人错误"),
DATA_IS_NOT_VALID(50017,"data %s not valid", "数据[%s]无效"),
DATA_IS_NULL(50018,"data %s is null", "数据[%s]不能为空"),
DATA_IS_NOT_VALID(50017,"data {0} not valid", "数据[{0}]无效"),
DATA_IS_NULL(50018,"data {0} is null", "数据[{0}]不能为空"),
PROCESS_NODE_HAS_CYCLE(50019,"process node has cycle", "流程节点间存在循环依赖"),
PROCESS_NODE_S_PARAMETER_INVALID(50020,"process node %s parameter invalid", "流程节点[%s]参数无效"),
PROCESS_DEFINE_STATE_ONLINE(50021, "process definition {0} is already on line", "工作流定义[{0}]已上线"),
@ -228,6 +226,7 @@ public enum Status {
BATCH_DELETE_PROCESS_DEFINE_BY_IDS_ERROR(50026,"batch delete process definition by ids {0} error", "批量删除工作流定义[{0}]错误"),
TENANT_NOT_SUITABLE(50027,"there is not any tenant suitable, please choose a tenant available.", "没有合适的租户,请选择可用的租户"),
EXPORT_PROCESS_DEFINE_BY_ID_ERROR(50028,"export process definition by id error", "导出工作流定义错误"),
BATCH_EXPORT_PROCESS_DEFINE_BY_IDS_ERROR(50028,"batch export process definition by ids error", "批量导出工作流定义错误"),
IMPORT_PROCESS_DEFINE_ERROR(50029,"import process definition error", "导入工作流定义错误"),
HDFS_NOT_STARTUP(60001,"hdfs not startup", "hdfs未启用"),

56
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/exceptions/ServiceException.java

@ -0,0 +1,56 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.exceptions;
import org.apache.dolphinscheduler.api.enums.Status;
/**
* service exception
*/
public class ServiceException extends RuntimeException {
/**
* code
*/
private Integer code;
public ServiceException() {
}
public ServiceException(Status status) {
super(status.getMsg());
this.code = status.getCode();
}
public ServiceException(Integer code,String message) {
super(message);
this.code = code;
}
public ServiceException(String message) {
super(message);
}
public Integer getCode() {
return this.code;
}
public void setCode(Integer code) {
this.code = code;
}
}

33
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java

@ -21,6 +21,7 @@ import org.apache.dolphinscheduler.api.enums.ExecuteType;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
@ -59,7 +60,7 @@ public class ExecutorService extends BaseService{
private ProcessDefinitionMapper processDefinitionMapper;
@Autowired
private ProcessDefinitionService processDefinitionService;
private MonitorService monitorService;
@Autowired
@ -123,6 +124,12 @@ public class ExecutorService extends BaseService{
return result;
}
// check master exists
if (!checkMasterExists(result)) {
return result;
}
/**
* create command
*/
@ -143,6 +150,22 @@ public class ExecutorService extends BaseService{
return result;
}
/**
* check whether master exists
* @param result result
* @return master exists return true , otherwise return false
*/
private boolean checkMasterExists(Map<String, Object> result) {
// check master server exists
List<Server> masterServers = monitorService.getServerListFromZK(true);
// no master
if (masterServers.size() == 0) {
putMsg(result, Status.MASTER_NOT_EXISTS);
return false;
}
return true;
}
/**
@ -186,6 +209,12 @@ public class ExecutorService extends BaseService{
return checkResult;
}
// check master exists
if (!checkMasterExists(result)) {
return result;
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
@ -499,7 +528,7 @@ public class ExecutorService extends BaseService{
// determine whether to complement
if(commandType == CommandType.COMPLEMENT_DATA){
runMode = (runMode == null) ? RunMode.RUN_MODE_SERIAL : runMode;
if(null != start && null != end && start.before(end)){
if(null != start && null != end && !start.after(end)){
if(runMode == RunMode.RUN_MODE_SERIAL){
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end));

33
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java

@ -65,25 +65,24 @@ public class LoggerService {
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId);
if (taskInstance == null){
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
}
String host = Host.of(taskInstance.getHost()).getIp();
if(StringUtils.isEmpty(host)){
if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())){
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
}
String host = getHost(taskInstance.getHost());
Result result = new Result(Status.SUCCESS.getCode(), Status.SUCCESS.getMsg());
logger.info("log host : {} , logPath : {} , logServer port : {}",host,taskInstance.getLogPath(),Constants.RPC_PORT);
String log = logClient.rollViewLog(host, Constants.RPC_PORT, taskInstance.getLogPath(),skipLineNum,limit);
result.setData(log);
logger.info(log);
return result;
}
/**
* get log size
*
@ -92,10 +91,24 @@ public class LoggerService {
*/
public byte[] getLogBytes(int taskInstId) {
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId);
if (taskInstance == null){
throw new RuntimeException("task instance is null");
if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())){
throw new RuntimeException("task instance is null or host is null");
}
String host = Host.of(taskInstance.getHost()).getIp();
String host = getHost(taskInstance.getHost());
return logClient.getLogBytes(host, Constants.RPC_PORT, taskInstance.getLogPath());
}
/**
* get host
* @param address address
* @return old version return true ,otherwise return false
*/
private String getHost(String address){
if (Host.isOldVersion(address)){
return address;
}
return Host.of(address).getIp();
}
}

307
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java

@ -112,8 +112,13 @@ public class ProcessDefinitionService extends BaseDAGService {
* @return create result code
* @throws JsonProcessingException JsonProcessingException
*/
public Map<String, Object> createProcessDefinition(User loginUser, String projectName, String name,
String processDefinitionJson, String desc, String locations, String connects) throws JsonProcessingException {
public Map<String, Object> createProcessDefinition(User loginUser,
String projectName,
String name,
String processDefinitionJson,
String desc,
String locations,
String connects) throws JsonProcessingException {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
@ -281,6 +286,41 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
/**
* copy process definition
*
* @param loginUser login user
* @param projectName project name
* @param processId process definition id
* @return copy result code
*/
public Map<String, Object> copyProcessDefinition(User loginUser, String projectName, Integer processId) throws JsonProcessingException{
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId);
return result;
} else {
return createProcessDefinition(
loginUser,
projectName,
processDefinition.getName()+"_copy_"+System.currentTimeMillis(),
processDefinition.getProcessDefinitionJson(),
processDefinition.getDescription(),
processDefinition.getLocations(),
processDefinition.getConnects());
}
}
/**
* update process definition
*
@ -523,14 +563,18 @@ public class ProcessDefinitionService extends BaseDAGService {
}
/**
* export process definition by id
*
* @param loginUser login user
* @param projectName project name
* @param processDefinitionId process definition id
* @param response response
* batch export process definition by ids
* @param loginUser
* @param projectName
* @param processDefinitionIds
* @param response
*/
public void exportProcessDefinitionById(User loginUser, String projectName, Integer processDefinitionId, HttpServletResponse response) {
public void batchExportProcessDefinitionByIds(User loginUser, String projectName, String processDefinitionIds, HttpServletResponse response){
if(StringUtils.isEmpty(processDefinitionIds)){
return;
}
//export project info
Project project = projectMapper.queryByName(projectName);
@ -538,39 +582,68 @@ public class ProcessDefinitionService extends BaseDAGService {
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus == Status.SUCCESS) {
if(resultStatus != Status.SUCCESS){
return;
}
List<ProcessMeta> processDefinitionList =
getProcessDefinitionList(processDefinitionIds);
if(CollectionUtils.isNotEmpty(processDefinitionList)){
downloadProcessDefinitionFile(response, processDefinitionList);
}
}
/**
* get process definition list by ids
* @param processDefinitionIds
* @return
*/
private List<ProcessMeta> getProcessDefinitionList(String processDefinitionIds){
List<ProcessMeta> processDefinitionList = new ArrayList<>();
String[] processDefinitionIdArray = processDefinitionIds.split(",");
for (String strProcessDefinitionId : processDefinitionIdArray) {
//get workflow info
int processDefinitionId = Integer.parseInt(strProcessDefinitionId);
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(processDefinitionId);
if (null != processDefinition) {
String exportProcessJson = exportProcessMetaDataStr(processDefinitionId, processDefinition);
response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE);
response.setHeader("Content-Disposition", "attachment;filename="+processDefinition.getName()+".json");
BufferedOutputStream buff = null;
ServletOutputStream out = null;
processDefinitionList.add(exportProcessMetaData(processDefinitionId, processDefinition));
}
}
return processDefinitionList;
}
/**
* download the process definition file
* @param response
* @param processDefinitionList
*/
private void downloadProcessDefinitionFile(HttpServletResponse response, List<ProcessMeta> processDefinitionList) {
response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE);
BufferedOutputStream buff = null;
ServletOutputStream out = null;
try {
out = response.getOutputStream();
buff = new BufferedOutputStream(out);
buff.write(JSON.toJSONString(processDefinitionList).getBytes(StandardCharsets.UTF_8));
buff.flush();
buff.close();
} catch (IOException e) {
logger.warn("export process fail", e);
}finally {
if (null != buff) {
try {
out = response.getOutputStream();
buff = new BufferedOutputStream(out);
buff.write(exportProcessJson.getBytes(StandardCharsets.UTF_8));
buff.flush();
buff.close();
} catch (IOException e) {
logger.warn("export process fail", e);
}finally {
if (null != buff) {
try {
buff.close();
} catch (Exception e) {
logger.warn("export process buffer not close", e);
}
}
if (null != out) {
try {
out.close();
} catch (Exception e) {
logger.warn("export process output stream not close", e);
}
}
} catch (Exception e) {
logger.warn("export process buffer not close", e);
}
}
if (null != out) {
try {
out.close();
} catch (Exception e) {
logger.warn("export process output stream not close", e);
}
}
}
@ -583,6 +656,17 @@ public class ProcessDefinitionService extends BaseDAGService {
* @return export process metadata string
*/
public String exportProcessMetaDataStr(Integer processDefinitionId, ProcessDefinition processDefinition) {
//create workflow json file
return JSONUtils.toJsonString(exportProcessMetaData(processDefinitionId,processDefinition));
}
/**
* get export process metadata string
* @param processDefinitionId process definition id
* @param processDefinition process definition
* @return export process metadata string
*/
public ProcessMeta exportProcessMetaData(Integer processDefinitionId, ProcessDefinition processDefinition) {
//correct task param which has data source or dependent param
String correctProcessDefinitionJson = addExportTaskNodeSpecialParam(processDefinition.getProcessDefinitionJson());
processDefinition.setProcessDefinitionJson(correctProcessDefinitionJson);
@ -599,14 +683,6 @@ public class ProcessDefinitionService extends BaseDAGService {
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
if (!schedules.isEmpty()) {
Schedule schedule = schedules.get(0);
/*WorkerGroup workerGroup = workerGroupMapper.selectById(schedule.getWorkerGroupId());
if (null == workerGroup && schedule.getWorkerGroupId() == -1) {
workerGroup = new WorkerGroup();
workerGroup.setId(-1);
workerGroup.setName("");
}*/
exportProcessMeta.setScheduleWarningType(schedule.getWarningType().toString());
exportProcessMeta.setScheduleWarningGroupId(schedule.getWarningGroupId());
exportProcessMeta.setScheduleStartTime(DateUtils.dateToString(schedule.getStartTime()));
@ -618,7 +694,7 @@ public class ProcessDefinitionService extends BaseDAGService {
exportProcessMeta.setScheduleWorkerGroupName(schedule.getWorkerGroup());
}
//create workflow json file
return JSONUtils.toJsonString(exportProcessMeta);
return exportProcessMeta;
}
/**
@ -665,24 +741,36 @@ public class ProcessDefinitionService extends BaseDAGService {
public Map<String, Object> importProcessDefinition(User loginUser, MultipartFile file, String currentProjectName) {
Map<String, Object> result = new HashMap<>(5);
String processMetaJson = FileUtils.file2String(file);
ProcessMeta processMeta = JSONUtils.parseObject(processMetaJson, ProcessMeta.class);
List<ProcessMeta> processMetaList = JSON.parseArray(processMetaJson,ProcessMeta.class);
//check file content
if (null == processMeta) {
if (CollectionUtils.isEmpty(processMetaList)) {
putMsg(result, Status.DATA_IS_NULL, "fileContent");
return result;
}
if (StringUtils.isEmpty(processMeta.getProjectName())) {
putMsg(result, Status.DATA_IS_NULL, "projectName");
return result;
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionName())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionName");
return result;
for(ProcessMeta processMeta:processMetaList){
if (!checkAndImportProcessDefinition(loginUser, currentProjectName, result, processMeta)){
return result;
}
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionJson())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionJson");
return result;
return result;
}
/**
* check and import process definition
* @param loginUser
* @param currentProjectName
* @param result
* @param processMeta
* @return
*/
private boolean checkAndImportProcessDefinition(User loginUser, String currentProjectName, Map<String, Object> result, ProcessMeta processMeta) {
if(!checkImportanceParams(processMeta,result)){
return false;
}
//deal with process name
@ -694,31 +782,84 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefinitionName, 1);
}
//add special task param
String importProcessParam = addImportTaskNodeParam(loginUser, processMeta.getProcessDefinitionJson(), targetProject);
// get create process result
Map<String, Object> createProcessResult =
getCreateProcessResult(loginUser,
currentProjectName,
result,
processMeta,
processDefinitionName,
addImportTaskNodeParam(loginUser, processMeta.getProcessDefinitionJson(), targetProject));
if(createProcessResult == null){
return false;
}
//create process definition
Integer processDefinitionId =
Objects.isNull(createProcessResult.get("processDefinitionId"))?
null:Integer.parseInt(createProcessResult.get("processDefinitionId").toString());
//scheduler param
return getImportProcessScheduleResult(loginUser,
currentProjectName,
result,
processMeta,
processDefinitionName,
processDefinitionId);
}
Map<String, Object> createProcessResult;
/**
* get create process result
* @param loginUser
* @param currentProjectName
* @param result
* @param processMeta
* @param processDefinitionName
* @param importProcessParam
* @return
*/
private Map<String, Object> getCreateProcessResult(User loginUser,
String currentProjectName,
Map<String, Object> result,
ProcessMeta processMeta,
String processDefinitionName,
String importProcessParam){
Map<String, Object> createProcessResult = null;
try {
createProcessResult = createProcessDefinition(loginUser
,currentProjectName,
processDefinitionName,
processDefinitionName+"_import_"+System.currentTimeMillis(),
importProcessParam,
processMeta.getProcessDefinitionDescription(),
processMeta.getProcessDefinitionLocations(),
processMeta.getProcessDefinitionConnects());
putMsg(result, Status.SUCCESS);
} catch (JsonProcessingException e) {
logger.error("import process meta json data: {}", e.getMessage(), e);
putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR);
return result;
}
putMsg(result, Status.SUCCESS);
//create process definition
Integer processDefinitionId = null;
if (null != createProcessResult && Objects.nonNull(createProcessResult.get("processDefinitionId"))) {
processDefinitionId = Integer.parseInt(createProcessResult.get("processDefinitionId").toString());
}
//scheduler param
return createProcessResult;
}
/**
* get import process schedule result
* @param loginUser
* @param currentProjectName
* @param result
* @param processMeta
* @param processDefinitionName
* @param processDefinitionId
* @return
*/
private boolean getImportProcessScheduleResult(User loginUser,
String currentProjectName,
Map<String, Object> result,
ProcessMeta processMeta,
String processDefinitionName,
Integer processDefinitionId) {
if (null != processMeta.getScheduleCrontab() && null != processDefinitionId) {
int scheduleInsert = importProcessSchedule(loginUser,
currentProjectName,
@ -728,11 +869,33 @@ public class ProcessDefinitionService extends BaseDAGService {
if (0 == scheduleInsert) {
putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR);
return result;
return false;
}
}
return true;
}
return result;
/**
* check importance params
* @param processMeta
* @param result
* @return
*/
private boolean checkImportanceParams(ProcessMeta processMeta,Map<String, Object> result){
if (StringUtils.isEmpty(processMeta.getProjectName())) {
putMsg(result, Status.DATA_IS_NULL, "projectName");
return false;
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionName())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionName");
return false;
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionJson())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionJson");
return false;
}
return true;
}
/**

93
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java

@ -26,6 +26,7 @@ import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
@ -44,8 +45,10 @@ import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
@ -234,9 +237,6 @@ public class ResourcesService extends BaseService {
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
@ -317,7 +317,6 @@ public class ResourcesService extends BaseService {
return result;
}
if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
@ -325,9 +324,10 @@ public class ResourcesService extends BaseService {
//check resource aleady exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!resource.getAlias().equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
@ -338,11 +338,24 @@ public class ResourcesService extends BaseService {
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
String nameWithSuffix = name;
String originResourceName = resource.getAlias();
if (!resource.isDirectory()) {
//get the file suffix
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
//if the name without suffix then add it ,else use the origin name
@ -352,7 +365,7 @@ public class ResourcesService extends BaseService {
}
// updateResource data
List<Integer> childrenResource = listAllChildren(resource);
List<Integer> childrenResource = listAllChildren(resource,false);
String oldFullName = resource.getFullName();
Date now = new Date();
@ -364,10 +377,11 @@ public class ResourcesService extends BaseService {
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory() && CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList = new ArrayList<>();
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(oldFullName, fullName));
t.setFullName(t.getFullName().replaceFirst(oldFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
@ -385,29 +399,24 @@ public class ResourcesService extends BaseService {
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new RuntimeException(Status.UPDATE_RESOURCE_ERROR.getMsg());
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name)) {
return result;
}
// get file hdfs path
// delete hdfs file by type
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
if (HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.info("hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} else {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
}
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
@ -542,34 +551,6 @@ public class ResourcesService extends BaseService {
return result;
}
/**
* get all resources
* @param loginUser login user
* @return all resource set
*/
/*private Set<Resource> getAllResources(User loginUser, ResourceType type) {
int userId = loginUser.getId();
boolean listChildren = true;
if(isAdmin(loginUser)){
userId = 0;
listChildren = false;
}
List<Resource> resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
Set<Resource> allResourceList = new HashSet<>(resourceList);
if (listChildren) {
Set<Integer> authorizedIds = new HashSet<>();
List<Resource> authorizedDirecoty = resourceList.stream().filter(t->t.getUserId() != loginUser.getId() && t.isDirectory()).collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(authorizedDirecoty)) {
for(Resource resource : authorizedDirecoty){
authorizedIds.addAll(listAllChildren(resource));
}
List<Resource> childrenResources = resourcesMapper.listResourceByIds(authorizedIds.toArray(new Integer[authorizedIds.size()]));
allResourceList.addAll(childrenResources);
}
}
return allResourceList;
}*/
/**
* query resource list
*
@ -580,8 +561,11 @@ public class ResourcesService extends BaseService {
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), type.ordinal(),0);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
@ -631,7 +615,7 @@ public class ResourcesService extends BaseService {
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource);
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF functon
@ -1193,12 +1177,13 @@ public class ResourcesService extends BaseService {
/**
* list all children id
* @param resource resource
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource){
List<Integer> listAllChildren(Resource resource,boolean containSelf){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1) {
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}

1
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java

@ -365,6 +365,7 @@ public class SchedulerService extends BaseService {
if (masterServers.size() == 0) {
putMsg(result, Status.MASTER_NOT_EXISTS);
return result;
}
// set status

14
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/ZooKeeperState.java

@ -33,7 +33,7 @@ public class ZooKeeperState {
private final String host;
private final int port;
private int minLatency = -1, avgLatency = -1, maxLatency = -1;
private float minLatency = -1, avgLatency = -1, maxLatency = -1;
private long received = -1;
private long sent = -1;
private int outStanding = -1;
@ -60,9 +60,9 @@ public class ZooKeeperState {
String line = scannerForStat.nextLine();
if (line.startsWith("Latency min/avg/max:")) {
String[] latencys = getStringValueFromLine(line).split("/");
minLatency = Integer.parseInt(latencys[0]);
avgLatency = Integer.parseInt(latencys[1]);
maxLatency = Integer.parseInt(latencys[2]);
minLatency = Float.parseFloat(latencys[0]);
avgLatency = Float.parseFloat(latencys[1]);
maxLatency = Float.parseFloat(latencys[2]);
} else if (line.startsWith("Received:")) {
received = Long.parseLong(getStringValueFromLine(line));
} else if (line.startsWith("Sent:")) {
@ -165,15 +165,15 @@ public class ZooKeeperState {
return port;
}
public int getMinLatency() {
public float getMinLatency() {
return minLatency;
}
public int getAvgLatency() {
public float getAvgLatency() {
return avgLatency;
}
public int getMaxLatency() {
public float getMaxLatency() {
return maxLatency;
}

6
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/ZookeeperMonitor.java

@ -88,9 +88,9 @@ public class ZookeeperMonitor extends AbstractZKClient {
long sent = state.getSent();
long received = state.getReceived();
String mode = state.getMode();
int minLatency = state.getMinLatency();
int avgLatency = state.getAvgLatency();
int maxLatency = state.getMaxLatency();
float minLatency = state.getMinLatency();
float avgLatency = state.getAvgLatency();
float maxLatency = state.getMaxLatency();
int nodeCount = state.getNodeCount();
int status = ok ? 1 : 0;
Date date = new Date();

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save