Browse Source

Merge remote-tracking branch 'upstream/dev' into dev

# Conflicts:
#	dolphinscheduler-server/src/main/resources/config/install_config.conf
#	dolphinscheduler-server/src/main/resources/worker.properties
#	script/scp-hosts.sh
pull/3/MERGE
dailidong 4 years ago
parent
commit
6f36c6cc7e
  1. 6
      .github/ISSUE_TEMPLATE/bug_report.md
  2. 9
      .github/ISSUE_TEMPLATE/feature_request.md
  3. 24
      .github/ISSUE_TEMPLATE/improvement_suggestion.md
  4. 10
      .github/ISSUE_TEMPLATE/question.md
  5. 24
      .github/ISSUE_TEMPLATE/test.md
  6. 6
      .github/workflows/ci_e2e.yml
  7. 12
      .github/workflows/ci_ut.yml
  8. 17
      LICENSE
  9. 84
      NOTICE
  10. 17
      README.md
  11. 17
      README_zh_CN.md
  12. 106
      ambari_plugin/README.md
  13. 4
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/params.py
  14. 2
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/dolphin-daemon.j2
  15. 62
      ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json
  16. 6
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-alert.xml
  17. 261
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-datasource.xml
  18. 9
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-worker.xml
  19. 8
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-zookeeper.xml
  20. 4
      ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml
  21. 3
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_alert_service.py
  22. 3
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_api_service.py
  23. 4
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_logger_service.py
  24. 3
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_master_service.py
  25. 3
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_worker_service.py
  26. 15
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/params.py
  27. 2
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/dolphin-daemon.sh.j2
  28. 2
      docker/build/Dockerfile
  29. 66
      docker/build/README.md
  30. 66
      docker/build/README_zh_CN.md
  31. 14
      docker/build/conf/dolphinscheduler/alert.properties.tpl
  32. 12
      docker/build/conf/dolphinscheduler/common.properties.tpl
  33. 15
      docker/build/conf/dolphinscheduler/datasource.properties.tpl
  34. 2
      docker/build/conf/dolphinscheduler/logback/logback-alert.xml
  35. 2
      docker/build/conf/dolphinscheduler/zookeeper.properties.tpl
  36. 23
      docker/build/startup-init-conf.sh
  37. 40
      docker/build/startup.sh
  38. 53
      docker/docker-swarm/docker-compose.yml
  39. 52
      docker/docker-swarm/docker-stack.yml
  40. 2
      docker/kubernetes/dolphinscheduler/Chart.yaml
  41. 42
      docker/kubernetes/dolphinscheduler/README.md
  42. 25
      docker/kubernetes/dolphinscheduler/requirements.yaml
  43. 13
      docker/kubernetes/dolphinscheduler/templates/NOTES.txt
  44. 8
      docker/kubernetes/dolphinscheduler/templates/_helpers.tpl
  45. 1
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
  46. 35
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml
  47. 98
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
  48. 93
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
  49. 17
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
  50. 93
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
  51. 163
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
  52. 87
      docker/kubernetes/dolphinscheduler/values.yaml
  53. 7
      dolphinscheduler-alert/pom.xml
  54. 15
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EmailManager.java
  55. 16
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java
  56. 12
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java
  57. 36
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/impl/DefaultHTMLTemplate.java
  58. 6
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
  59. 4
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/DingTalkUtils.java
  60. 107
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtils.java
  61. 1
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/ExcelUtils.java
  62. 69
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/JSONUtils.java
  63. 5
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java
  64. 8
      dolphinscheduler-alert/src/main/resources/alert.properties
  65. 2
      dolphinscheduler-alert/src/main/resources/logback-alert.xml
  66. 2
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java
  67. 45
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/impl/DefaultHTMLTemplateTest.java
  68. 5
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/DingTalkUtilsTest.java
  69. 15
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java
  70. 112
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/JSONUtilsTest.java
  71. 1
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/MailUtilsTest.java
  72. 7
      dolphinscheduler-api/pom.xml
  73. 3
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/configuration/AppConfiguration.java
  74. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
  75. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
  76. 31
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/UsersController.java
  77. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/ScheduleParam.java
  78. 5
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/gantt/Task.java
  79. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java
  80. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java
  81. 28
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java
  82. 9
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java
  83. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/treeview/Instance.java
  84. 6
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
  85. 18
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/interceptor/LoginHandlerInterceptor.java
  86. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/BaseDAGService.java
  87. 91
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
  88. 12
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java
  89. 48
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java
  90. 54
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/MonitorService.java
  91. 417
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
  92. 33
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
  93. 6
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProjectService.java
  94. 75
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
  95. 28
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java
  96. 6
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TenantService.java
  97. 98
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UsersService.java
  98. 7
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java
  99. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/CheckUtils.java
  100. 5
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/ZookeeperMonitor.java
  101. Some files were not shown because too many files have changed in this diff Show More

6
.github/ISSUE_TEMPLATE/bug_report.md

@ -1,7 +1,7 @@
---
name: Bug report
about: Create a report to help us improve
title: "[BUG] bug title "
title: "[Bug][Module Name] Bug title "
labels: bug
assignees: ''
@ -9,6 +9,8 @@ assignees: ''
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the bug**
A clear and concise description of what the bug is.
@ -32,5 +34,5 @@ If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
**Requirement or improvement
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.

9
.github/ISSUE_TEMPLATE/feature_request.md

@ -1,12 +1,19 @@
---
name: Feature request
about: Suggest an idea for this project
title: "[Feature]"
title: "[Feature][Module Name] Feature title"
labels: new feature
assignees: ''
---
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the feature**
A clear and concise description of what the feature is.
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]

24
.github/ISSUE_TEMPLATE/improvement_suggestion.md

@ -0,0 +1,24 @@
---
name: Improvement suggestion
about: Improvement suggestion for this project
title: "[Improvement][Module Name] Improvement title"
labels: improvement
assignees: ''
---
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the question**
A clear and concise description of what the improvement is.
**What are the current deficiencies and the benefits of improvement**
- A clear and concise description of the current deficiencies and the benefits of this improvement.
**Which version of DolphinScheduler:**
-[1.1.0-preview]
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.

10
.github/ISSUE_TEMPLATE/question.md

@ -1,7 +1,7 @@
---
name: question
about: have a question wanted to be help
title: "[QUESTION] question title"
name: Question
about: Have a question wanted to be help
title: "[Question] Question title"
labels: question
assignees: ''
@ -9,6 +9,8 @@ assignees: ''
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the question**
A clear and concise description of what the question is.
@ -19,5 +21,5 @@ A clear and concise description of what the question is.
**Additional context**
Add any other context about the problem here.
**Requirement or improvement
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.

24
.github/ISSUE_TEMPLATE/test.md

@ -0,0 +1,24 @@
---
name: Test
about: Test to enhance the robustness of this project
title: "[Test][Module Name] Test title"
labels: test
assignees: ''
---
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the question**
A clear and concise description of what the test part is.
**What are the current deficiencies and the benefits of changing or adding this test**
- A clear and concise description of the current deficiencies, the benefits of changing or adding this test, and the scope involved.
**Which version of DolphinScheduler:**
-[1.1.0-preview]
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions you've considered.

6
.github/workflows/ci_e2e.yml

@ -58,7 +58,7 @@ jobs:
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo dpkg -i google-chrome*.deb
sudo apt-get install -f -y
wget -N https://chromedriver.storage.googleapis.com/80.0.3987.106/chromedriver_linux64.zip
wget -N https://chromedriver.storage.googleapis.com/83.0.4103.39/chromedriver_linux64.zip
unzip chromedriver_linux64.zip
sudo mv -f chromedriver /usr/local/share/chromedriver
sudo ln -s /usr/local/share/chromedriver /usr/local/bin/chromedriver
@ -66,9 +66,9 @@ jobs:
run: cd ./e2e && mvn -B clean test
- name: Collect logs
if: failure()
uses: actions/upload-artifact@v1
uses: actions/upload-artifact@v2
with:
name: dslogs
path: /var/lib/docker/volumes/docker-swarm_dolphinscheduler-logs/_data
path: ${{ github.workspace }}/docker/docker-swarm/dolphinscheduler-logs

12
.github/workflows/ci_ut.yml

@ -62,15 +62,19 @@ jobs:
git fetch origin
- name: Compile
run: |
export MAVEN_OPTS='-Dmaven.repo.local=.m2/repository -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Xmx3g'
export MAVEN_OPTS='-Dmaven.repo.local=.m2/repository -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Xmx5g'
mvn test -B -Dmaven.test.skip=false
- name: Upload coverage report to codecov
run: |
CODECOV_TOKEN="09c2663f-b091-4258-8a47-c981827eb29a" bash <(curl -s https://codecov.io/bash)
# Set up JDK 11 for SonarCloud.
- name: Set up JDK 1.11
uses: actions/setup-java@v1
with:
java-version: 1.11
- name: Run SonarCloud Analysis
run: >
mvn verify --batch-mode
org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.1.1688:sonar
mvn --batch-mode verify sonar:sonar
-Dsonar.coverage.jacoco.xmlReportPaths=target/site/jacoco/jacoco.xml
-Dmaven.test.skip=true
-Dsonar.host.url=https://sonarcloud.io
@ -78,7 +82,7 @@ jobs:
-Dsonar.core.codeCoveragePlugin=jacoco
-Dsonar.projectKey=apache-dolphinscheduler
-Dsonar.login=e4058004bc6be89decf558ac819aa1ecbee57682
-Dsonar.exclusions=dolphinscheduler-ui/src/**/i18n/locale/*.js
-Dsonar.exclusions=dolphinscheduler-ui/src/**/i18n/locale/*.js,dolphinscheduler-microbench/src/**/*
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}

17
LICENSE

@ -199,3 +199,20 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=======================================================================
Apache DolphinScheduler (incubating) Subcomponents:
The Apache DolphinScheduler (incubating) project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
========================================================================
Apache 2.0 licenses
========================================================================
The following components are provided under the Apache License. See project link for details.
The text of each license is the standard Apache 2.0 license.
ScriptRunner from https://github.com/mybatis/mybatis-3 Apache 2.0
mvnw files from https://github.com/takari/maven-wrapper Apache 2.0
PropertyPlaceholderHelper from https://github.com/spring-projects/spring-framework Apache 2.0

84
NOTICE

@ -1,5 +1,87 @@
Apache DolphinScheduler (incubating)
Copyright 2019 The Apache Software Foundation
Copyright 2019-2020 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
mybatis-3
iBATIS
This product includes software developed by
The Apache Software Foundation (http://www.apache.org/).
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
OGNL
//--------------------------------------------------------------------------
// Copyright (c) 2004, Drew Davidson and Luke Blanshard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// Neither the name of the Drew Davidson nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//--------------------------------------------------------------------------
Refactored SqlBuilder class (SQL, AbstractSQL)
This product includes software developed by
Adam Gent (https://gist.github.com/3650165)
Copyright 2010 Adam Gent
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Spring Framework ${version}
Copyright (c) 2002-${copyright} Pivotal, Inc.
This product is licensed to you under the Apache License, Version 2.0
(the "License"). You may not use this product except in compliance with
the License.
This product may include a number of subcomponents with separate
copyright notices and license terms. Your use of the source code for
these subcomponents is subject to the terms and conditions of the
subcomponent's license, as noted in the license.txt file.

17
README.md

@ -55,20 +55,10 @@ Overload processing: Task queue mechanism, the number of schedulable tasks on a
![monitor](https://user-images.githubusercontent.com/59273635/75625839-c698a480-5bfc-11ea-8bbe-895b561b337f.png)
![security](https://user-images.githubusercontent.com/15833811/75236441-bfd2f180-57f8-11ea-88bd-f24311e01b7e.png)
![treeview](https://user-images.githubusercontent.com/15833811/75217191-3fe56100-57d1-11ea-8856-f19180d9a879.png)
### Document
- <a href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/backend-deployment.html" target="_blank">Backend deployment documentation</a>
- <a href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/frontend-deployment.html" target="_blank">Front-end deployment documentation</a>
- [**User manual**](https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/system-manual.html?_blank "System manual")
- [**Upgrade document**](https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/upgrade.html?_blank "Upgrade document")
### Online Demo
- <a href="http://106.75.43.194:8888" target="_blank">Online Demo</a>
More documentation please refer to <a href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/quick-start.html" target="_blank">[DolphinScheduler online documentation]</a>
### Recent R&D plan
Work plan of Dolphin Scheduler: [R&D plan](https://github.com/apache/incubator-dolphinscheduler/projects/1), Under the `In Develop` card is what is currently being developed, TODO card is to be done (including feature ideas)
@ -86,9 +76,8 @@ Welcome to participate in contributing, please refer to the process of submittin
Artifact:
```
dolphinscheduler-dist/dolphinscheduler-backend/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-backend-bin.tar.gz: Binary package of DolphinScheduler-Backend
dolphinscheduler-dist/dolphinscheduler-front/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-front-bin.tar.gz: Binary package of DolphinScheduler-UI
dolphinscheduler-dist/dolphinscheduler-src/target/apache-dolphinscheduler-incubating-${latest.release.version}-src.zip: Source code package of DolphinScheduler
dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-bin.tar.gz: Binary package of DolphinScheduler
dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release.version}-src.zip: Source code package of DolphinScheduler
```
### Thanks

17
README_zh_CN.md

@ -50,20 +50,10 @@ Dolphin Scheduler Official Website
![security](https://user-images.githubusercontent.com/15833811/75209633-baa28200-57b9-11ea-9def-94bef2e212a7.jpg)
### 文档
- <a href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/backend-deployment.html" target="_blank">后端部署文档</a>
- <a href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/frontend-deployment.html" target="_blank">前端部署文档</a>
- [**使用手册**](https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/system-manual.html?_blank "系统使用手册")
- [**升级文档**](https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/upgrade.html?_blank "升级文档")
### 我要体验
- <a href="http://106.75.43.194:8888" target="_blank">我要体验</a>
更多文档请参考 <a href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/quick-start.html" target="_blank">DolphinScheduler中文在线文档</a>
### 近期研发计划
@ -83,9 +73,8 @@ DolphinScheduler的工作计划:<a href="https://github.com/apache/incubator-d
Artifact:
```
dolphinscheduler-dist/dolphinscheduler-backend/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-backend-bin.tar.gz: Binary package of DolphinScheduler-Backend
dolphinscheduler-dist/dolphinscheduler-front/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-front-bin.tar.gz: Binary package of DolphinScheduler-UI
dolphinscheduler-dist/dolphinscheduler-src/target/apache-dolphinscheduler-incubating-${latest.release.version}-src.zip: Source code package of DolphinScheduler
dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-bin.tar.gz: Binary package of DolphinScheduler
dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release.version}-src.zip: Source code package of DolphinScheduler
```
### 感谢

106
ambari_plugin/README.md

@ -1,27 +1,46 @@
### Dolphin Scheduler的Ambari插件使用说明
### Instructions for using the Dolphin Scheduler's Ambari plug-in
##### 备注
#### Note
1. 本文档适用于对Ambari中基本了解的用户
2. 本文档是对已安装Ambari服务添加Dolphin Scheduler(1.3.0版本)服务的说明
1. This document is intended for users with a basic understanding of Ambari
2. This document is a description of adding the Dolphin Scheduler service to the installed Ambari service
3. This document is based on version 2.5.2 of Ambari
##### 一 安装准备
#### Installation preparation
1. 准备RPM包
1. Prepare the RPM packages
- 在源码dolphinscheduler-dist目录下执行命令```mvn -U clean install rpm:attached-rpm -Prpmbuild -Dmaven.test.skip=true -X```即可生成(在目录 dolphinscheduler-dist/target/rpm/apache-dolphinscheduler-incubating/RPMS/noarch )
- It is generated by executing the command ```mvn -U clean install -Prpmbuild -Dmaven.test.skip=true -X``` in the project root directory (In the directory: dolphinscheduler-dist/target/rpm/apache-dolphinscheduler-incubating/RPMS/noarch )
2. 创建DS的安装用户--权限
2. Create an installation for DS,who have read and write access to the installation directory (/opt/soft)
3. 初始化数据库信息
3. Install with rpm package
- Manual installation (recommended):
- Copy the prepared RPM packages to each node of the cluster.
- Execute with DS installation user: ```rpm -ivh apache-dolphinscheduler-incubating-xxx.noarch.rpm```
- Mysql-connector-java packaged using the default POM file will not be included.
- The RPM package was packaged in the project with the installation path of /opt/soft.
If you use mysql as the database, you need add it manually.
- Automatic installation with ambari
- Each node of the cluster needs to configure the local yum source
- Copy the prepared RPM packages to each node local yum source
4. Copy plug-in directory
- copy directory ambari_plugin/common-services/DOLPHIN to ambari-server/resources/common-services/
- copy directory ambari_plugin/statcks/DOLPHIN to ambari-server/resources/stacks/HDP/2.6/services/--stack version is selected based on the actual situation
5. Initializes the database information
```
-- 创建Dolphin Scheduler的数据库:dolphinscheduler
-- Create the database for the Dolphin Scheduler:dolphinscheduler
CREATE DATABASE dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE
utf8_general_ci;
-- 初始化dolphinscheduler数据库的用户和密码,并分配权限
-- 替换下面sql语句中的{user}为dolphinscheduler数据库的用户
-- Initialize the user and password for the dolphinscheduler database and assign permissions
-- Replace the {user} in the SQL statement below with the user of the dolphinscheduler database
GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'%' IDENTIFIED BY '{password}';
GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'localhost' IDENTIFIED BY
'{password}';
@ -30,39 +49,84 @@
##### 二 Ambari安装Dolphin Scheduler
#### Ambari Install Dolphin Scheduler
- **NOTE: You have to install zookeeper first**
1. Ambari界面安装Dolphin Scheduler
1. Install Dolphin Scheduler on ambari web interface
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_001.png)
2. 选择Dolphin Scheduler的Master安装的节点
2. Select the nodes for the Dolphin Scheduler's Master installation
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_002.png)
3. 配置Dolphin Scheduler的Worker、Api、Logger、Alert安装的节点
3. Configure the Dolphin Scheduler's nodes for Worker, Api, Logger, Alert installation
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_003.png)
4. 设置Dolphin Scheduler服务的安装用户(**步骤一中创建的**)及所属的用户组
4. Set the installation users of the Dolphin Scheduler service (created in step 1) and the user groups they belong to
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_004.png)
5. 配置数据库的信息(和步骤一中初始化数据库中一致)
5. System Env Optimization will export some system environment config. Modify according to actual situation
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_020.png)
6. Configure the database information (same as in the initialization database in step 1)
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_005.png)
6. 配置其它的信息--如果需要的话
7. Configure additional information if needed
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_006.png)
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_007.png)
7. 正常执行接下来的步骤
8. Perform the next steps as normal
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_008.png)
8. 安装成功后的界面
9. The interface after successful installation
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_009.png)
------
#### Add components to the node through Ambari -- for example, add a DS Worker
***NOTE***: DS Logger is the installation dependent component of DS Worker in Dolphin's Ambari installation (need to add installation first; Prevent the Job log on the corresponding Worker from being checked)
1. Locate the component node to add -- for example, node ark3
![DS2_AMBARI_011](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_011.png)
2. Add components -- the drop-down list is all addable
![DS2_AMBARI_012](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_012.png)
3. Confirm component addition
![DS2_AMBARI_013](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_013.png)
4. After adding DS Worker and DS Logger components
![DS2_AMBARI_015](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_015.png)
5. Start the component
![DS2_AMBARI_016](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_016.png)
#### Remove the component from the node with Ambari
1. Stop the component in the corresponding node
![DS2_AMBARI_018](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_018.png)
2. Remove components
![DS2_AMBARI_019](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_019.png)

4
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/params.py

@ -76,8 +76,8 @@ else:
dolphin_alert_map = {}
wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token'
wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret'
wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}'
wechat_team_send_msg = '{\"toparty\":\"{toParty}\",\"agentid\":\"{agentId}\",\"msgtype\":\"text\",\"text\":{\"content\":\"{msg}\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"{toUser}\",\"agentid\":\"{agentId}\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"{msg}\"}}'
dolphin_alert_map['enterprise.wechat.push.ur'] = wechat_push_url
dolphin_alert_map['enterprise.wechat.token.url'] = wechat_token_url

2
ambari_plugin/common-services/DOLPHIN/1.2.1/package/templates/dolphin-daemon.j2

@ -39,7 +39,7 @@ export HOSTNAME=`hostname`
DOLPHINSCHEDULER_LIB_JARS={{dolphin_lib_jars}}
DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms4g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms1g -Xss512k -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=10m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
STOP_TIMEOUT=5
log={{dolphin_log_dir}}/dolphinscheduler-$command-$HOSTNAME.out

62
ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json

@ -28,6 +28,33 @@
}
}
],
"DOLPHIN_LOGGER": [
{
"name": "dolphin_logger_port_check",
"label": "dolphin_logger_port_check",
"description": "dolphin_logger_port_check.",
"interval": 10,
"scope": "ANY",
"source": {
"type": "PORT",
"uri": "{{dolphin-common/loggerserver.rpc.port}}",
"default_port": 50051,
"reporting": {
"ok": {
"text": "TCP OK - {0:.3f}s response on port {1}"
},
"warning": {
"text": "TCP OK - {0:.3f}s response on port {1}",
"value": 1.5
},
"critical": {
"text": "Connection failed: {0} to {1}:{2}",
"value": 5.0
}
}
}
}
],
"DOLPHIN_MASTER": [
{
"name": "DOLPHIN_MASTER_CHECK",
@ -126,39 +153,6 @@
]
}
}
],
"DOLPHIN_ALERT": [
{
"name": "DOLPHIN_DOLPHIN_LOGGER_CHECK",
"label": "check dolphin scheduler alert status",
"description": "",
"interval":10,
"scope": "HOST",
"enabled": true,
"source": {
"type": "SCRIPT",
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
"parameters": [
{
"name": "connection.timeout",
"display_name": "Connection Timeout",
"value": 5.0,
"type": "NUMERIC",
"description": "The maximum time before this alert is considered to be CRITICAL",
"units": "seconds",
"threshold": "CRITICAL"
},
{
"name": "alertName",
"display_name": "alertName",
"value": "DOLPHIN_LOGGER",
"type": "STRING",
"description": "alert name"
}
]
}
}
]
}
}
}

6
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-alert.xml

@ -21,12 +21,6 @@
<description>alert type is EMAIL/SMS</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>alert.template</name>
<value>html</value>
<description>alter msg template, default is html template</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.protocol</name>
<value>SMTP</value>

261
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-datasource.xml

@ -203,265 +203,4 @@
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.mapper-locations</name>
<value>classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.typeEnumsPackage</name>
<value>org.apache.dolphinscheduler.*.enums</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.typeAliasesPackage</name>
<value>org.apache.dolphinscheduler.dao.entity</value>
<description>
Entity scan, where multiple packages are separated by a comma or semicolon
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.id-type</name>
<value>AUTO</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>AUTO</value>
<label>AUTO</label>
</entry>
<entry>
<value>INPUT</value>
<label>INPUT</label>
</entry>
<entry>
<value>ID_WORKER</value>
<label>ID_WORKER</label>
</entry>
<entry>
<value>UUID</value>
<label>UUID</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>
Primary key type AUTO:" database ID AUTO ",
INPUT:" user INPUT ID",
ID_WORKER:" global unique ID (numeric type unique ID)",
UUID:" global unique ID UUID";
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.field-strategy</name>
<value>NOT_NULL</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>IGNORED</value>
<label>IGNORED</label>
</entry>
<entry>
<value>NOT_NULL</value>
<label>NOT_NULL</label>
</entry>
<entry>
<value>NOT_EMPTY</value>
<label>NOT_EMPTY</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>
Field policy IGNORED:" ignore judgment ",
NOT_NULL:" not NULL judgment "),
NOT_EMPTY:" not NULL judgment"
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.column-underline</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.logic-delete-value</name>
<value>1</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.logic-not-delete-value</name>
<value>0</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.banner</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.map-underscore-to-camel-case</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.cache-enabled</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.call-setters-on-nulls</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.jdbc-type-for-null</name>
<value>null</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.exec.task.num</name>
<value>20</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.retryTimes</name>
<value>5</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.interval</name>
<value>1000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.reserved.memory</name>
<value>0.1</value>
<value-attributes>
<type>float</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.fetch.task.num</name>
<value>3</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.reserved.memory</name>
<value>0.1</value>
<value-attributes>
<type>float</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

9
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-worker.xml

@ -33,15 +33,6 @@
<description>worker heartbeat interval</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.fetch.task.num</name>
<value>3</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>submit the number of tasks at a time</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.max.cpuload.avg</name>
<value>100</value>

8
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-zookeeper.xml

@ -15,14 +15,6 @@
~ limitations under the License.
-->
<configuration>
<property>
<name>dolphinscheduler.queue.impl</name>
<value>zookeeper</value>
<description>
Task queue implementation, default "zookeeper"
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.dolphinscheduler.root</name>
<value>/dolphinscheduler</value>

4
ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml

@ -103,7 +103,7 @@
<osFamily>any</osFamily>
<packages>
<package>
<name>apache-dolphinscheduler-incubating-1.3.0*</name>
<name>apache-dolphinscheduler-incubating*</name>
</package>
</packages>
</osSpecific>
@ -134,4 +134,4 @@
</quickLinksConfigurations>
</service>
</services>
</metainfo>
</metainfo>

3
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_alert_service.py

@ -26,7 +26,8 @@ class DolphinAlertService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params

3
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_api_service.py

@ -26,7 +26,8 @@ class DolphinApiService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params

4
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_logger_service.py

@ -26,8 +26,8 @@ class DolphinLoggerService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params
params.pika_slave = True

3
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_master_service.py

@ -27,7 +27,8 @@ class DolphinMasterService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params

3
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_worker_service.py

@ -26,7 +26,8 @@ class DolphinWorkerService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params

15
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/params.py

@ -77,8 +77,8 @@ else:
dolphin_alert_map = {}
wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token'
wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret'
wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}'
wechat_team_send_msg = '{\"toparty\":\"{toParty}\",\"agentid\":\"{agentId}\",\"msgtype\":\"text\",\"text\":{\"content\":\"{msg}\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"{toUser}\",\"agentid\":\"{agentId}\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"{msg}\"}}'
dolphin_alert_config_map = config['configurations']['dolphin-alert']
@ -114,10 +114,6 @@ else:
dolphin_common_map_tmp = config['configurations']['dolphin-common']
data_basedir_path = dolphin_common_map_tmp['data.basedir.path']
process_exec_basepath = data_basedir_path + '/exec'
data_download_basedir_path = data_basedir_path + '/download'
dolphin_common_map['process.exec.basepath'] = process_exec_basepath
dolphin_common_map['data.download.basedir.path'] = data_download_basedir_path
dolphin_common_map['dolphinscheduler.env.path'] = dolphin_env_path
dolphin_common_map.update(config['configurations']['dolphin-common'])
@ -149,6 +145,11 @@ if len(zookeeperHosts) > 0 and "clientPort" in config['configurations']['zoo.cfg
zookeeperPort = ":" + clientPort + ","
dolphin_zookeeper_map['zookeeper.quorum'] = zookeeperPort.join(zookeeperHosts) + ":" + clientPort
dolphin_zookeeper_map.update(config['configurations']['dolphin-zookeeper'])
if 'spring.servlet.multipart.max-file-size' in dolphin_app_api_map:
file_size = dolphin_app_api_map['spring.servlet.multipart.max-file-size']
dolphin_app_api_map['spring.servlet.multipart.max-file-size'] = file_size + "MB"
if 'spring.servlet.multipart.max-request-size' in dolphin_app_api_map:
request_size = dolphin_app_api_map['spring.servlet.multipart.max-request-size']
dolphin_app_api_map['spring.servlet.multipart.max-request-size'] = request_size + "MB"

2
ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/dolphin-daemon.sh.j2

@ -39,7 +39,7 @@ export HOSTNAME=`hostname`
DOLPHINSCHEDULER_LIB_JARS={{dolphin_lib_jars}}
DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms4g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms1g -Xss512k -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=10m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
STOP_TIMEOUT=5
log={{dolphin_log_dir}}/dolphinscheduler-$command-$HOSTNAME.out

2
docker/build/Dockerfile

@ -42,7 +42,7 @@ ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin/ /opt/dolphinscheduler/
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#4. install pg
#4. install database, if use mysql as your backend database, the `mysql-client` package should be installed
RUN apk add postgresql postgresql-contrib
#5. modify nginx
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \

66
docker/build/README.md

@ -16,7 +16,7 @@ Official Website: https://dolphinscheduler.apache.org
#### You can start a dolphinscheduler instance
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -e POSTGRESQL_DATABASE=dolphinscheduler \
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
-p 8888:8888 \
dolphinscheduler all
```
@ -25,14 +25,14 @@ The default postgres user `root`, postgres password `root` and database `dolphin
The default zookeeper is created in the `startup.sh`.
#### Or via Environment Variables **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`POSTGRESQL_DATABASE`** **`ZOOKEEPER_QUORUM`**
#### Or via Environment Variables **`DATABASE_HOST`** **`DATABASE_PORT`** **`DATABASE_DATABASE`** **`ZOOKEEPER_QUORUM`**
You can specify **existing postgres service**. Example:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 8888:8888 \
dolphinscheduler all
```
@ -42,7 +42,7 @@ You can specify **existing zookeeper service**. Example:
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
-p 8888:8888 \
dolphinscheduler all
```
@ -56,8 +56,8 @@ You can start a standalone dolphinscheduler server.
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler master-server
```
@ -66,8 +66,8 @@ dolphinscheduler master-server
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler worker-server
```
@ -75,8 +75,8 @@ dolphinscheduler worker-server
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 12345:12345 \
dolphinscheduler api-server
```
@ -85,8 +85,8 @@ dolphinscheduler api-server
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler alert-server
```
@ -99,7 +99,7 @@ $ docker run -dit --name dolphinscheduler \
dolphinscheduler frontend
```
**Note**: You must be specify `POSTGRESQL_HOST` `POSTGRESQL_PORT` `POSTGRESQL_DATABASE` `POSTGRESQL_USERNAME` `POSTGRESQL_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
**Note**: You must be specify `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
## How to build a docker image
@ -124,33 +124,51 @@ Please read `./docker/build/hooks/build` `./docker/build/hooks/build.bat` script
The Dolphin Scheduler image uses several environment variables which are easy to miss. While none of the variables are required, they may significantly aid you in using the image.
**`POSTGRESQL_HOST`**
**`DATABASE_TYPE`**
This environment variable sets the host for PostgreSQL. The default value is `127.0.0.1`.
This environment variable sets the type for database. The default value is `postgresql`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_PORT`**
**`DATABASE_DRIVER`**
This environment variable sets the port for PostgreSQL. The default value is `5432`.
This environment variable sets the type for database. The default value is `org.postgresql.Driver`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`DATABASE_HOST`**
This environment variable sets the host for database. The default value is `127.0.0.1`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`DATABASE_PORT`**
This environment variable sets the port for database. The default value is `5432`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_USERNAME`**
**`DATABASE_USERNAME`**
This environment variable sets the username for database. The default value is `root`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`DATABASE_PASSWORD`**
This environment variable sets the username for PostgreSQL. The default value is `root`.
This environment variable sets the password for database. The default value is `root`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_PASSWORD`**
**`DATABASE_DATABASE`**
This environment variable sets the password for PostgreSQL. The default value is `root`.
This environment variable sets the database for database. The default value is `dolphinscheduler`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_DATABASE`**
**`DATABASE_PARAMS`**
This environment variable sets the database for PostgreSQL. The default value is `dolphinscheduler`.
This environment variable sets the database for database. The default value is `characterEncoding=utf8`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.

66
docker/build/README_zh_CN.md

@ -16,7 +16,7 @@ Official Website: https://dolphinscheduler.apache.org
#### 你可以运行一个dolphinscheduler实例
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -e POSTGRESQL_DATABASE=dolphinscheduler \
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
-p 8888:8888 \
dolphinscheduler all
```
@ -25,14 +25,14 @@ dolphinscheduler all
同时,默认的`Zookeeper`也会在`startup.sh`脚本中被创建。
#### 或者通过环境变量 **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`ZOOKEEPER_QUORUM`** 使用已存在的服务
#### 或者通过环境变量 **`DATABASE_HOST`** **`DATABASE_PORT`** **`ZOOKEEPER_QUORUM`** 使用已存在的服务
你可以指定一个已经存在的 **`Postgres`** 服务. 如下:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 8888:8888 \
dolphinscheduler all
```
@ -42,7 +42,7 @@ dolphinscheduler all
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
-p 8888:8888 \
dolphinscheduler all
```
@ -56,8 +56,8 @@ dolphinscheduler all
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler master-server
```
@ -66,8 +66,8 @@ dolphinscheduler master-server
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler worker-server
```
@ -75,8 +75,8 @@ dolphinscheduler worker-server
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 12345:12345 \
dolphinscheduler api-server
```
@ -85,8 +85,8 @@ dolphinscheduler api-server
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler alert-server
```
@ -99,7 +99,7 @@ $ docker run -dit --name dolphinscheduler \
dolphinscheduler frontend
```
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `POSTGRESQL_HOST` `POSTGRESQL_PORT` `POSTGRESQL_DATABASE` `POSTGRESQL_USERNAME` `POSTGRESQL_PASSWORD` `ZOOKEEPER_QUORUM`
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM`
## 如何构建一个docker镜像
@ -124,33 +124,51 @@ c:\incubator-dolphinscheduler>.\docker\build\hooks\build.bat
Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些变量不是必须的,但是可以帮助你更容易配置镜像并根据你的需求定义相应的服务配置。
**`POSTGRESQL_HOST`**
**`DATABASE_TYPE`**
配置`PostgreSQL`的`HOST`, 默认值 `127.0.0.1`
配置`database`的`TYPE`, 默认值 `postgresql`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_PORT`**
**`DATABASE_DRIVER`**
配置`PostgreSQL`的`PORT`, 默认值 `5432`
配置`database`的`DRIVER`, 默认值 `org.postgresql.Driver`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_USERNAME`**
**`DATABASE_HOST`**
配置`PostgreSQL`的`USERNAME`, 默认值 `root`
配置`database`的`HOST`, 默认值 `127.0.0.1`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_PASSWORD`**
**`DATABASE_PORT`**
配置`PostgreSQL`的`PASSWORD`, 默认值 `root`
配置`database`的`PORT`, 默认值 `5432`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_DATABASE`**
**`DATABASE_USERNAME`**
配置`PostgreSQL`的`DATABASE`, 默认值 `dolphinscheduler`
配置`database`的`USERNAME`, 默认值 `root`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`DATABASE_PASSWORD`**
配置`database`的`PASSWORD`, 默认值 `root`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`DATABASE_DATABASE`**
配置`database`的`DATABASE`, 默认值 `dolphinscheduler`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`DATABASE_PARAMS`**
配置`database`的`PARAMS`, 默认值 `characterEncoding=utf8`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。

14
docker/build/conf/dolphinscheduler/alert.properties.tpl

@ -35,16 +35,22 @@ mail.smtp.ssl.trust=${MAIL_SMTP_SSL_TRUST}
#xls file path,need create if not exist
xls.file.path=${XLS_FILE_PATH}
# plugins dir
plugin.dir=${ALERT_PLUGIN_DIR}
# Enterprise WeChat configuration
enterprise.wechat.enable=${ENTERPRISE_WECHAT_ENABLE}
enterprise.wechat.corp.id=${ENTERPRISE_WECHAT_CORP_ID}
enterprise.wechat.secret=${ENTERPRISE_WECHAT_SECRET}
enterprise.wechat.agent.id=${ENTERPRISE_WECHAT_AGENT_ID}
enterprise.wechat.users=${ENTERPRISE_WECHAT_USERS}
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={corpId}&corpsecret={secret}
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={token}
enterprise.wechat.team.send.msg={\"toparty\":\"{toParty}\",\"agentid\":\"{agentId}\",\"msgtype\":\"text\",\"text\":{\"content\":\"{msg}\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"{toUser}\",\"agentid\":\"{agentId}\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"{msg}\"}}

12
docker/build/conf/dolphinscheduler/common.properties.tpl

@ -25,13 +25,13 @@ dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH}
data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH}
# resource upload startup type : HDFS,S3,NONE
resource.storage.type=NONE
resource.storage.type=${RESOURCE_STORAGE_TYPE}
#============================================================================
# HDFS
#============================================================================
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
#resource.upload.path=/dolphinscheduler
resource.upload.path=${RESOURCE_UPLOAD_PATH}
# whether kerberos starts
#hadoop.security.authentication.startup.state=false
@ -58,16 +58,16 @@ kerberos.expire.time=7
# S3
#============================================================================
# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
fs.defaultFS=hdfs://mycluster:8020
fs.defaultFS=${FS_DEFAULT_FS}
# if resource.storage.type=S3,s3 endpoint
#fs.s3a.endpoint=http://192.168.199.91:9010
fs.s3a.endpoint=${FS_S3A_ENDPOINT}
# if resource.storage.type=S3,s3 access key
#fs.s3a.access.key=A3DXS30FO22544RE
fs.s3a.access.key=${FS_S3A_ACCESS_KEY}
# if resource.storage.type=S3,s3 secret key
#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
fs.s3a.secret.key=${FS_S3A_SECRET_KEY}
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx

15
docker/build/conf/dolphinscheduler/datasource.properties.tpl

@ -15,16 +15,11 @@
# limitations under the License.
#
# mysql
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
# postgre
spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8
spring.datasource.username=${POSTGRESQL_USERNAME}
spring.datasource.password=${POSTGRESQL_PASSWORD}
# db
spring.datasource.driver-class-name=${DATABASE_DRIVER}
spring.datasource.url=jdbc:${DATABASE_TYPE}://${DATABASE_HOST}:${DATABASE_PORT}/${DATABASE_DATABASE}?${DATABASE_PARAMS}
spring.datasource.username=${DATABASE_USERNAME}
spring.datasource.password=${DATABASE_PASSWORD}
## base spring data source configuration todo need to remove
#spring.datasource.type=com.alibaba.druid.pool.DruidDataSource

2
docker/build/conf/dolphinscheduler/logback/logback-alert.xml

@ -46,7 +46,7 @@
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
<appender-ref ref="ALERTLOGFILE"/>
</root>
</configuration>

2
docker/build/conf/dolphinscheduler/zookeeper.properties.tpl

@ -19,7 +19,7 @@
zookeeper.quorum=${ZOOKEEPER_QUORUM}
# dolphinscheduler root directory
#zookeeper.dolphinscheduler.root=/dolphinscheduler
zookeeper.dolphinscheduler.root=${ZOOKEEPER_ROOT}
# dolphinscheduler failover directory
#zookeeper.session.timeout=60000

23
docker/build/startup-init-conf.sh

@ -24,22 +24,33 @@ echo "init env variables"
#============================================================================
# Database Source
#============================================================================
export POSTGRESQL_HOST=${POSTGRESQL_HOST:-"127.0.0.1"}
export POSTGRESQL_PORT=${POSTGRESQL_PORT:-"5432"}
export POSTGRESQL_USERNAME=${POSTGRESQL_USERNAME:-"root"}
export POSTGRESQL_PASSWORD=${POSTGRESQL_PASSWORD:-"root"}
export POSTGRESQL_DATABASE=${POSTGRESQL_DATABASE:-"dolphinscheduler"}
export DATABASE_HOST=${DATABASE_HOST:-"127.0.0.1"}
export DATABASE_PORT=${DATABASE_PORT:-"5432"}
export DATABASE_USERNAME=${DATABASE_USERNAME:-"root"}
export DATABASE_PASSWORD=${DATABASE_PASSWORD:-"root"}
export DATABASE_DATABASE=${DATABASE_DATABASE:-"dolphinscheduler"}
export DATABASE_TYPE=${DATABASE_TYPE:-"postgresql"}
export DATABASE_DRIVER=${DATABASE_DRIVER:-"org.postgresql.Driver"}
export DATABASE_PARAMS=${DATABASE_PARAMS:-"characterEncoding=utf8"}
#============================================================================
# System
#============================================================================
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"}
export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"}
export DOLPHINSCHEDULER_OPTS=${DOLPHINSCHEDULER_OPTS:-""}
export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"NONE"}
export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/ds"}
export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"s3a://xxxx"}
export FS_S3A_ENDPOINT=${FS_S3A_ENDPOINT:-"s3.xxx.amazonaws.com"}
export FS_S3A_ACCESS_KEY=${FS_S3A_ACCESS_KEY:-"xxxxxxx"}
export FS_S3A_SECRET_KEY=${FS_S3A_SECRET_KEY:-"xxxxxxx"}
#============================================================================
# Zookeeper
#============================================================================
export ZOOKEEPER_QUORUM=${ZOOKEEPER_QUORUM:-"127.0.0.1:2181"}
export ZOOKEEPER_ROOT=${ZOOKEEPER_ROOT:-"/dolphinscheduler"}
#============================================================================
# Master Server
@ -67,6 +78,8 @@ export WORKER_GROUP=${WORKER_GROUP:-"default"}
#============================================================================
# Alert Server
#============================================================================
# alert plugin dir
export ALERT_PLUGIN_DIR=${ALERT_PLUGIN_DIR:-"/opt/dolphinscheduler"}
# XLS FILE
export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"}
# mail

40
docker/build/startup.sh

@ -22,24 +22,32 @@ DOLPHINSCHEDULER_BIN=${DOLPHINSCHEDULER_HOME}/bin
DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script
DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
# start postgresql
initPostgreSQL() {
echo "test postgresql service"
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
# start database
initDatabase() {
echo "test ${DATABASE_TYPE} service"
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to ${DATABASE_TYPE}."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to ${DATABASE_TYPE} at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 5
done
echo "connect postgresql service"
v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -p ${POSTGRESQL_PORT} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1")
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
echo "connect ${DATABASE_TYPE} service"
if [ ${DATABASE_TYPE} = "mysql" ]; then
v=$(mysql -h${DATABASE_HOST} -P${DATABASE_PORT} -u${DATABASE_USERNAME} --password=${DATABASE_PASSWORD} -D ${DATABASE_DATABASE} -e "select 1" 2>&1)
if [ "$(echo '${v}' | grep 'ERROR' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
else
v=$(sudo -u postgres PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
fi
echo "import sql data"
@ -123,7 +131,7 @@ LOGFILE=/var/log/nginx/access.log
case "$1" in
(all)
initZK
initPostgreSQL
initDatabase
initMasterServer
initWorkerServer
initApiServer
@ -134,25 +142,25 @@ case "$1" in
;;
(master-server)
initZK
initPostgreSQL
initDatabase
initMasterServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log
;;
(worker-server)
initZK
initPostgreSQL
initDatabase
initWorkerServer
initLoggerServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log
;;
(api-server)
initZK
initPostgreSQL
initDatabase
initApiServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
;;
(alert-server)
initPostgreSQL
initDatabase
initAlertServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log
;;

53
docker/docker-swarm/docker-compose.yml

@ -56,11 +56,11 @@ services:
- 12345:12345
environment:
TZ: Asia/Shanghai
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
@ -72,7 +72,7 @@ services:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -95,7 +95,7 @@ services:
depends_on:
- dolphinscheduler-api
volumes:
- dolphinscheduler-logs:/var/log/nginx
- ./dolphinscheduler-logs:/var/log/nginx
networks:
- dolphinscheduler
@ -119,11 +119,11 @@ services:
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
interval: 30s
@ -133,7 +133,7 @@ services:
depends_on:
- dolphinscheduler-postgresql
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -152,11 +152,11 @@ services:
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
@ -168,7 +168,7 @@ services:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -188,11 +188,11 @@ services:
WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
@ -210,9 +210,7 @@ services:
- type: volume
source: dolphinscheduler-worker-data
target: /tmp/dolphinscheduler
- type: volume
source: dolphinscheduler-logs
target: /opt/dolphinscheduler/logs
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -225,7 +223,6 @@ volumes:
dolphinscheduler-postgresql-initdb:
dolphinscheduler-zookeeper:
dolphinscheduler-worker-data:
dolphinscheduler-logs:
configs:
dolphinscheduler-worker-task-env:

52
docker/docker-swarm/docker-stack.yml

@ -20,13 +20,13 @@ services:
dolphinscheduler-postgresql:
image: bitnami/postgresql:latest
ports:
- 5432:5432
environment:
TZ: Asia/Shanghai
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ports:
- 5432:5432
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
networks:
@ -37,12 +37,12 @@ services:
dolphinscheduler-zookeeper:
image: bitnami/zookeeper:latest
ports:
- 2181:2181
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
ports:
- 2181:2181
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
@ -54,16 +54,16 @@ services:
dolphinscheduler-api:
image: apache/dolphinscheduler:latest
command: ["api-server"]
ports:
- 12345:12345
environment:
TZ: Asia/Shanghai
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
ports:
- 12345:12345
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30
@ -120,11 +120,11 @@ services:
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
interval: 30
@ -153,11 +153,11 @@ services:
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
@ -188,11 +188,11 @@ services:
WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]

2
docker/kubernetes/dolphinscheduler/Chart.yaml

@ -49,4 +49,4 @@ dependencies:
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled
condition: zookeeper.enabled

42
docker/kubernetes/dolphinscheduler/README.md

@ -46,8 +46,8 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `image.pullSecres` | PullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
@ -55,12 +55,15 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.type` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database type will use it. | `postgresql` |
| `externalDatabase.driver` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database driver will use it. | `org.postgresql.Driver` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
| | | |
| `externalDatabase.params` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database params will use it. | `characterEncoding=utf8` |
| | | |
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
@ -68,12 +71,25 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
| | | |
| `externalZookeeper.zookeeperRoot` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper root path for `master` and `worker` | `dolphinscheduler` |
| | | |
| `common.configmap.DOLPHINSCHEDULER_ENV_PATH` | Extra env file path. | `/tmp/dolphinscheduler/env` |
| `common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | File uploaded path of DS. | `/tmp/dolphinscheduler/files` |
| `common.configmap.RESOURCE_STORAGE_TYPE` | Resource Storate type, support type are: S3、HDFS、NONE. | `NONE` |
| `common.configmap.RESOURCE_UPLOAD_PATH` | The base path of resource. | `/ds` |
| `common.configmap.FS_DEFAULT_FS` | The default fs of resource, for s3 is the `s3a` prefix and bucket name. | `s3a://xxxx` |
| `common.configmap.FS_S3A_ENDPOINT` | If the resource type is `S3`, you should fill this filed, it's the endpoint of s3. | `s3.xxx.amazonaws.com` |
| `common.configmap.FS_S3A_ACCESS_KEY` | The access key for your s3 bucket. | `xxxxxxx` |
| `common.configmap.FS_S3A_SECRET_KEY` | The secret key for your s3 bucket. | `xxxxxxx` |
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| | | |
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `master.jvmOptions` | The JVM options for master server. | `""` |
| `master.resources` | The `resource` limit and request config for master server. | `{}` |
| `master.annotations` | The `annotations` for master server. | `{}` |
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
@ -97,12 +113,15 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| | | |
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `worker.jvmOptions` | The JVM options for worker server. | `""` |
| `worker.resources` | The `resource` limit and request config for worker server. | `{}` |
| `worker.annotations` | The `annotations` for worker server. | `{}` |
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
@ -131,7 +150,7 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| | | |
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
@ -139,6 +158,10 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `alert.jvmOptions` | The JVM options for alert server. | `""` |
| `alert.resources` | The `resource` limit and request config for alert server. | `{}` |
| `alert.annotations` | The `annotations` for alert server. | `{}` |
| `alert.configmap.ALERT_PLUGIN_DIR` | Alert plugin path. | `/opt/dolphinscheduler/alert/plugin` |
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
@ -177,6 +200,9 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `api.jvmOptions` | The JVM options for api server. | `""` |
| `api.resources` | The `resource` limit and request config for api server. | `{}` |
| `api.annotations` | The `annotations` for api server. | `{}` |
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
@ -201,6 +227,8 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.resources` | The `resource` limit and request config for frontend server. | `{}` |
| `frontend.annotations` | The `annotations` for frontend server. | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |

25
docker/kubernetes/dolphinscheduler/requirements.yaml

@ -1,25 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

13
docker/kubernetes/dolphinscheduler/templates/NOTES.txt

@ -29,16 +29,3 @@
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}
2. Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }}
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
echo "Dolphinscheduler URL: http://$HOSTNAME/"
{{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}

8
docker/kubernetes/dolphinscheduler/templates/_helpers.tpl

@ -96,6 +96,14 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- printf "%s/%s:%s" (include "dolphinscheduler.image.registry" .) .Values.image.repository .Values.image.tag -}}
{{- end -}}
{{/*
Create a default image pull secrects.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.image.pullSecrets" -}}
{{- default nil .Values.image.pullSecrets -}}
{{- end -}}
{{/*
Create a default fully qualified postgresql name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).

1
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml

@ -24,6 +24,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
ALERT_PLUGIN_DIR: {{ .Values.alert.configmap.ALERT_PLUGIN_DIR | quote }}
XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}
MAIL_SERVER_PORT: {{ .Values.alert.configmap.MAIL_SERVER_PORT | quote }}

35
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.common.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-common
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-common
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
DOLPHINSCHEDULER_ENV_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_ENV_PATH | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH | quote }}
RESOURCE_STORAGE_TYPE: {{ .Values.common.configmap.RESOURCE_STORAGE_TYPE | quote }}
RESOURCE_UPLOAD_PATH: {{ .Values.common.configmap.RESOURCE_UPLOAD_PATH | quote }}
FS_DEFAULT_FS: {{ .Values.common.configmap.FS_DEFAULT_FS | quote }}
FS_S3A_ENDPOINT: {{ .Values.common.configmap.FS_S3A_ENDPOINT | quote }}
FS_S3A_ACCESS_KEY: {{ .Values.common.configmap.FS_S3A_ACCESS_KEY | quote }}
FS_S3A_SECRET_KEY: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | quote }}
{{- end }}

98
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml

@ -43,6 +43,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.alert.affinity }}
affinity: {{- toYaml .Values.alert.affinity | nindent 8 }}
@ -54,34 +58,38 @@ spec:
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -89,8 +97,15 @@ spec:
- "alert-server"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.alert.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: ALERT_PLUGIN_DIR
valueFrom:
configMapKeyRef:
key: ALERT_PLUGIN_DIR
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
@ -161,25 +176,37 @@ spec:
configMapKeyRef:
key: ENTERPRISE_WECHAT_USERS
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: POSTGRESQL_HOST
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
{{- else }}
value: {{ .Values.externalDatabase.type | quote }}
{{- end }}
- name: DATABASE_DRIVER
{{- if .Values.postgresql.enabled }}
value: "org.postgresql.Driver"
{{- else }}
value: {{ .Values.externalDatabase.driver | quote }}
{{- end }}
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
- name: POSTGRESQL_USERNAME
- name: DATABASE_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
@ -189,12 +216,57 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: POSTGRESQL_DATABASE
- name: DATABASE_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: DATABASE_PARAMS
{{- if .Values.postgresql.enabled }}
value: "characterEncoding=utf8"
{{- else }}
value: {{ .Values.externalDatabase.params | quote }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.alert.resources }}
resources:
limits:
memory: {{ .Values.alert.resources.limits.memory }}
cpu: {{ .Values.alert.resources.limits.cpu }}
requests:
memory: {{ .Values.alert.resources.requests.memory }}
cpu: {{ .Values.alert.resources.requests.cpu }}
{{- end }}
{{- if .Values.alert.livenessProbe.enabled }}
livenessProbe:
exec:

93
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml

@ -43,6 +43,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.api.affinity }}
affinity: {{- toYaml .Values.api.affinity | nindent 8 }}
@ -54,34 +58,38 @@ spec:
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-api
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -92,27 +100,41 @@ spec:
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.api.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: POSTGRESQL_HOST
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
{{- else }}
value: {{ .Values.externalDatabase.type | quote }}
{{- end }}
- name: DATABASE_DRIVER
{{- if .Values.postgresql.enabled }}
value: "org.postgresql.Driver"
{{- else }}
value: {{ .Values.externalDatabase.driver | quote }}
{{- end }}
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
- name: POSTGRESQL_USERNAME
- name: DATABASE_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
@ -122,18 +144,63 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: POSTGRESQL_DATABASE
- name: DATABASE_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: DATABASE_PARAMS
{{- if .Values.postgresql.enabled }}
value: "characterEncoding=utf8"
{{- else }}
value: {{ .Values.externalDatabase.params | quote }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.api.resources }}
resources:
limits:
memory: {{ .Values.api.resources.limits.memory | quote }}
cpu: {{ .Values.api.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.api.resources.requests.memory | quote }}
cpu: {{ .Values.api.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.api.livenessProbe.enabled }}
livenessProbe:
tcpSocket:

17
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml

@ -43,6 +43,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.frontend.affinity }}
affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
@ -53,6 +57,10 @@ spec:
{{- if .Values.frontend.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -69,6 +77,15 @@ spec:
value: '{{ include "dolphinscheduler.fullname" . }}-api'
- name: FRONTEND_API_SERVER_PORT
value: "12345"
{{- if .Values.frontend.resources }}
resources:
limits:
memory: {{ .Values.frontend.resources.limits.memory | quote }}
cpu: {{ .Values.frontend.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.frontend.resources.requests.memory | quote }}
cpu: {{ .Values.frontend.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.frontend.livenessProbe.enabled }}
livenessProbe:
tcpSocket:

93
docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml

@ -40,6 +40,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.master.affinity }}
affinity: {{- toYaml .Values.master.affinity | nindent 8 }}
@ -75,34 +79,38 @@ spec:
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-master
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -113,6 +121,8 @@ spec:
name: "master-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.master.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: MASTER_EXEC_THREADS
@ -160,25 +170,37 @@ spec:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: POSTGRESQL_HOST
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
{{- else }}
value: {{ .Values.externalDatabase.type | quote }}
{{- end }}
- name: DATABASE_DRIVER
{{- if .Values.postgresql.enabled }}
value: "org.postgresql.Driver"
{{- else }}
value: {{ .Values.externalDatabase.driver | quote }}
{{- end }}
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
- name: POSTGRESQL_USERNAME
- name: DATABASE_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
@ -188,18 +210,63 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: POSTGRESQL_DATABASE
- name: DATABASE_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: DATABASE_PARAMS
{{- if .Values.postgresql.enabled }}
value: "characterEncoding=utf8"
{{- else }}
value: {{ .Values.externalDatabase.params | quote }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: {{ template "dolphinscheduler.zookeeper.quorum" . }}
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.master.resources }}
resources:
limits:
memory: {{ .Values.master.resources.limits.memory | quote }}
cpu: {{ .Values.master.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.master.resources.requests.memory | quote }}
cpu: {{ .Values.master.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.master.livenessProbe.enabled }}
livenessProbe:
exec:

163
docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml

@ -40,6 +40,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.worker.affinity }}
affinity: {{- toYaml .Values.worker.affinity | nindent 8 }}
@ -75,34 +79,38 @@ spec:
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-worker
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -115,6 +123,8 @@ spec:
name: "logs-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.worker.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: WORKER_EXEC_THREADS
@ -157,25 +167,37 @@ spec:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: POSTGRESQL_HOST
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
{{- else }}
value: {{ .Values.externalDatabase.type | quote }}
{{- end }}
- name: DATABASE_DRIVER
{{- if .Values.postgresql.enabled }}
value: "org.postgresql.Driver"
{{- else }}
value: {{ .Values.externalDatabase.driver | quote }}
{{- end }}
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
- name: POSTGRESQL_USERNAME
- name: DATABASE_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
@ -185,18 +207,133 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: POSTGRESQL_DATABASE
- name: DATABASE_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: DATABASE_PARAMS
{{- if .Values.postgresql.enabled }}
value: "characterEncoding=utf8"
{{- else }}
value: {{ .Values.externalDatabase.params | quote }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
key: XLS_FILE_PATH
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_HOST
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_HOST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_PORT
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_PORT
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SENDER
valueFrom:
configMapKeyRef:
key: MAIL_SENDER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_USER
valueFrom:
configMapKeyRef:
key: MAIL_USER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_PASSWD
valueFrom:
configMapKeyRef:
key: MAIL_PASSWD
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_STARTTLS_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_STARTTLS_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_TRUST
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_TRUST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_ENABLE
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_CORP_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_CORP_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_SECRET
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_SECRET
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_AGENT_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_AGENT_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_USERS
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_USERS
name: {{ include "dolphinscheduler.fullname" . }}-alert
{{- if .Values.worker.resources }}
resources:
limits:
memory: {{ .Values.worker.resources.limits.memory | quote }}
cpu: {{ .Values.worker.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.worker.resources.requests.memory | quote }}
cpu: {{ .Values.worker.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.worker.livenessProbe.enabled }}
livenessProbe:
exec:

87
docker/kubernetes/dolphinscheduler/values.yaml

@ -29,10 +29,9 @@ image:
repository: "dolphinscheduler"
tag: "latest"
pullPolicy: "IfNotPresent"
pullSecrets: []
imagePullSecrets: []
# If not exists external postgresql, by default, Dolphinscheduler's database will use it.
# If not exists external database, by default, Dolphinscheduler's database will use it.
postgresql:
enabled: true
postgresqlUsername: "root"
@ -43,21 +42,24 @@ postgresql:
size: "20Gi"
storageClass: "-"
# If exists external postgresql, and set postgresql.enable value to false.
# If postgresql.enable is false, Dolphinscheduler's database will use it.
# If exists external database, and set postgresql.enable value to false.
# external database will be used, otherwise Dolphinscheduler's database will be used.
externalDatabase:
type: "postgresql"
driver: "org.postgresql.Driver"
host: "localhost"
port: "5432"
username: "root"
password: "root"
database: "dolphinscheduler"
# multi params should join with & char
params: "characterEncoding=utf8"
# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
zookeeper:
enabled: true
taskQueue: "zookeeper"
config:
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
config: null
service:
port: "2181"
persistence:
@ -70,6 +72,18 @@ zookeeper:
externalZookeeper:
taskQueue: "zookeeper"
zookeeperQuorum: "127.0.0.1:2181"
zookeeperRoot: "/dolphinscheduler"
common:
configmap:
DOLPHINSCHEDULER_ENV_PATH: "/tmp/dolphinscheduler/env"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler/files"
RESOURCE_STORAGE_TYPE: "NONE"
RESOURCE_UPLOAD_PATH: "/ds"
FS_DEFAULT_FS: "s3a://xxxx"
FS_S3A_ENDPOINT: "s3.xxx.amazonaws.com"
FS_S3A_ACCESS_KEY: "xxxxxxx"
FS_S3A_SECRET_KEY: "xxxxxxx"
master:
podManagementPolicy: "Parallel"
@ -85,6 +99,18 @@ master:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "18Gi"
# cpu: "4"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
@ -137,6 +163,18 @@ worker:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "18Gi"
# cpu: "4"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
@ -213,9 +251,22 @@ alert:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "4Gi"
# cpu: "1"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
ALERT_PLUGIN_DIR: "/opt/dolphinscheduler/alert/plugin"
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
@ -275,6 +326,18 @@ api:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "4Gi"
# cpu: "2"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
@ -322,6 +385,16 @@ frontend:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
resources: {}
# limits:
# memory: "256Mi"
# cpu: "1"
# requests:
# memory: "256Mi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:

7
dolphinscheduler-alert/pom.xml

@ -21,7 +21,7 @@
<parent>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.2.1-SNAPSHOT</version>
<version>1.3.2-SNAPSHOT</version>
</parent>
<artifactId>dolphinscheduler-alert</artifactId>
<name>${project.artifactId}</name>
@ -66,11 +66,6 @@
<artifactId>commons-email</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>

15
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EmailManager.java

@ -27,28 +27,29 @@ import java.util.Map;
public class EmailManager {
/**
* email send
* @param receviersList the receiver list
* @param receviersCcList the cc List
* @param receiversList the receiver list
* @param receiversCcList the cc List
* @param title the title
* @param content the content
* @param showType the showType
* @return the send result
*/
public Map<String,Object> send(List<String> receviersList,List<String> receviersCcList,String title,String content,String showType){
public Map<String,Object> send(List<String> receiversList,List<String> receiversCcList,String title,String content,String showType){
return MailUtils.sendMails(receviersList, receviersCcList, title, content, showType);
return MailUtils.sendMails(receiversList, receiversCcList, title, content, showType);
}
/**
* msg send
* @param receviersList the receiver list
* @param receiversList the receiver list
* @param title the title
* @param content the content
* @param showType the showType
* @return the send result
*/
public Map<String,Object> send(List<String> receviersList,String title,String content,String showType){
public Map<String,Object> send(List<String> receiversList,String title,String content,String showType){
return MailUtils.sendMails(receviersList,title, content, showType);
return MailUtils.sendMails(receiversList,title, content, showType);
}
}

16
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java

@ -56,7 +56,7 @@ public class EmailAlertPlugin implements AlertPlugin {
@Override
public String getId() {
return Constants.PLUGIN_DEFAULT_EMAIL;
return Constants.PLUGIN_DEFAULT_EMAIL_ID;
}
@Override
@ -71,32 +71,32 @@ public class EmailAlertPlugin implements AlertPlugin {
AlertData alert = info.getAlertData();
List<String> receviersList = (List<String>) info.getProp(Constants.PLUGIN_DEFAULT_EMAIL_RECEIVERS);
List<String> receiversList = (List<String>) info.getProp(Constants.PLUGIN_DEFAULT_EMAIL_RECEIVERS);
// receiving group list
// custom receiver
String receivers = alert.getReceivers();
if (StringUtils.isNotEmpty(receivers)) {
String[] splits = receivers.split(",");
receviersList.addAll(Arrays.asList(splits));
receiversList.addAll(Arrays.asList(splits));
}
List<String> receviersCcList = new ArrayList<>();
List<String> receiversCcList = new ArrayList<>();
// Custom Copier
String receiversCc = alert.getReceiversCc();
if (StringUtils.isNotEmpty(receiversCc)) {
String[] splits = receiversCc.split(",");
receviersCcList.addAll(Arrays.asList(splits));
receiversCcList.addAll(Arrays.asList(splits));
}
if (CollectionUtils.isEmpty(receviersList) && CollectionUtils.isEmpty(receviersCcList)) {
if (CollectionUtils.isEmpty(receiversList) && CollectionUtils.isEmpty(receiversCcList)) {
logger.warn("alert send error : At least one receiver address required");
retMaps.put(Constants.STATUS, "false");
retMaps.put(Constants.MESSAGE, "execution failure,At least one receiver address required.");
return retMaps;
}
retMaps = emailManager.send(receviersList, receviersCcList, alert.getTitle(), alert.getContent(),
retMaps = emailManager.send(receiversList, receiversCcList, alert.getTitle(), alert.getContent(),
alert.getShowType());
//send flag
@ -124,7 +124,7 @@ public class EmailAlertPlugin implements AlertPlugin {
logger.error(e.getMessage(), e);
}
}
if (DingTalkUtils.isEnableDingTalk) {
logger.info("Ding Talk is enable.");
dingTalkManager.send(info);

12
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java

@ -60,9 +60,9 @@ public class AlertSender {
users = alertDao.listUserByAlertgroupId(alert.getAlertGroupId());
// receiving group list
List<String> receviersList = new ArrayList<>();
List<String> receiversList = new ArrayList<>();
for (User user : users) {
receviersList.add(user.getEmail());
receiversList.add(user.getEmail());
}
AlertData alertData = new AlertData();
@ -78,17 +78,17 @@ public class AlertSender {
AlertInfo alertInfo = new AlertInfo();
alertInfo.setAlertData(alertData);
alertInfo.addProp("receivers", receviersList);
alertInfo.addProp("receivers", receiversList);
AlertPlugin emailPlugin = pluginManager.findOne(Constants.PLUGIN_DEFAULT_EMAIL);
AlertPlugin emailPlugin = pluginManager.findOne(Constants.PLUGIN_DEFAULT_EMAIL_ID);
retMaps = emailPlugin.process(alertInfo);
if (retMaps == null) {
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, "alert send error", alert.getId());
logger.info("alert send error : return value is null");
logger.error("alert send error : return value is null");
} else if (!Boolean.parseBoolean(String.valueOf(retMaps.get(Constants.STATUS)))) {
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, String.valueOf(retMaps.get(Constants.MESSAGE)), alert.getId());
logger.info("alert send error : {}", retMaps.get(Constants.MESSAGE));
logger.error("alert send error : {}", retMaps.get(Constants.MESSAGE));
} else {
alertDao.updateAlert(AlertStatus.EXECUTION_SUCCESS, (String) retMaps.get(Constants.MESSAGE), alert.getId());
logger.info("alert send success");

36
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/impl/DefaultHTMLTemplate.java

@ -16,13 +16,15 @@
*/
package org.apache.dolphinscheduler.alert.template.impl;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import org.apache.dolphinscheduler.alert.template.AlertTemplate;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.alert.utils.JSONUtils;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.dolphinscheduler.common.utils.*;
import java.util.*;
@ -35,6 +37,7 @@ public class DefaultHTMLTemplate implements AlertTemplate {
public static final Logger logger = LoggerFactory.getLogger(DefaultHTMLTemplate.class);
@Override
public String getMessageFromTemplate(String content, ShowType showType,boolean showAll) {
@ -107,18 +110,11 @@ public class DefaultHTMLTemplate implements AlertTemplate {
private String getTextTypeMessage(String content,boolean showAll){
if (StringUtils.isNotEmpty(content)){
List<String> list;
try {
list = JSONUtils.toList(content,String.class);
}catch (Exception e){
logger.error("json format exception",e);
return null;
}
ArrayNode list = JSONUtils.parseArray(content);
StringBuilder contents = new StringBuilder(100);
for (String str : list){
for (JsonNode jsonNode : list){
contents.append(Constants.TR);
contents.append(Constants.TD).append(str).append(Constants.TD_END);
contents.append(Constants.TD).append(jsonNode.toString()).append(Constants.TD_END);
contents.append(Constants.TR_END);
}
@ -140,21 +136,7 @@ public class DefaultHTMLTemplate implements AlertTemplate {
checkNotNull(content);
String htmlTableThead = StringUtils.isEmpty(title) ? "" : String.format("<thead>%s</thead>\n",title);
return "<html>\n" +
" <head>\n" +
" <title>dolphinscheduler</title>\n" +
" <meta name='Keywords' content=''>\n" +
" <meta name='Description' content=''>\n" +
" <style type=\"text/css\">\n" +
" table {margin-top:0px;padding-top:0px;border:1px solid;font-size: 14px;color: #333333;border-width: 1px;border-color: #666666;border-collapse: collapse;}\n" +
" table th {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #dedede;text-align: right;}\n" +
" table td {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #ffffff;text-align: right;}\n" +
" </style>\n" +
" </head>\n" +
" <body style=\"margin:0;padding:0\">\n" +
" <table border=\"1px\" cellpadding=\"5px\" cellspacing=\"-10px\">\n" + htmlTableThead + content +
" </table>\n" +
" </body>\n" +
"</html>";
return Constants.HTML_HEADER_PREFIX +htmlTableThead + content + Constants.TABLE_BODY_HTML_TAIL;
}
}

6
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java

@ -174,12 +174,16 @@ public class Constants {
public static final String DINGTALK_ENABLE = "dingtalk.isEnable";
public static final String HTML_HEADER_PREFIX = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title>dolphinscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type=\"text/css\">table {margin-top:0px;padding-top:0px;border:1px solid;font-size: 14px;color: #333333;border-width: 1px;border-color: #666666;border-collapse: collapse;}table th {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #dedede;text-align: left;}table td {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #ffffff;text-align: left;}</style></head><body style=\"margin:0;padding:0\"><table border=\"1px\" cellpadding=\"5px\" cellspacing=\"-10px\"> ";
public static final String TABLE_BODY_HTML_TAIL = "</table></body></html>";
/**
* plugin config
*/
public static final String PLUGIN_DIR = "plugin.dir";
public static final String PLUGIN_DEFAULT_EMAIL = "email";
public static final String PLUGIN_DEFAULT_EMAIL_ID = "email";
public static final String PLUGIN_DEFAULT_EMAIL_CH = "邮件";

4
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/DingTalkUtils.java

@ -17,7 +17,7 @@
package org.apache.dolphinscheduler.alert.utils;
import com.alibaba.fastjson.JSON;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.commons.codec.binary.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
@ -129,7 +129,7 @@ public class DingTalkUtils {
textContent.put("content", txt);
items.put("text", textContent);
return JSON.toJSONString(items);
return JSONUtils.toJsonString(items);
}

107
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtils.java

@ -18,9 +18,8 @@ package org.apache.dolphinscheduler.alert.utils;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import com.alibaba.fastjson.JSON;
import org.apache.dolphinscheduler.common.utils.*;
import com.google.common.reflect.TypeToken;
import org.apache.dolphinscheduler.plugin.model.AlertData;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
@ -49,8 +48,8 @@ public class EnterpriseWeChatUtils {
private static final String ENTERPRISE_WE_CHAT_TOKEN_URL = PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_TOKEN_URL);
private static final String ENTERPRISE_WE_CHAT_TOKEN_URL_REPLACE = ENTERPRISE_WE_CHAT_TOKEN_URL == null ? null : ENTERPRISE_WE_CHAT_TOKEN_URL
.replaceAll("\\$corpId", ENTERPRISE_WE_CHAT_CORP_ID)
.replaceAll("\\$secret", ENTERPRISE_WE_CHAT_SECRET);
.replaceAll("\\{corpId\\}", ENTERPRISE_WE_CHAT_CORP_ID)
.replaceAll("\\{secret\\}", ENTERPRISE_WE_CHAT_SECRET);
private static final String ENTERPRISE_WE_CHAT_PUSH_URL = PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_PUSH_URL);
@ -64,14 +63,15 @@ public class EnterpriseWeChatUtils {
/**
* get Enterprise WeChat is enable
*
* @return isEnable
*/
public static boolean isEnable(){
public static boolean isEnable() {
Boolean isEnable = null;
try {
isEnable = PropertyUtils.getBoolean(Constants.ENTERPRISE_WECHAT_ENABLE);
} catch (Exception e) {
logger.error(e.getMessage(),e);
logger.error(e.getMessage(), e);
}
if (isEnable == null) {
return false;
@ -81,6 +81,7 @@ public class EnterpriseWeChatUtils {
/**
* get Enterprise WeChat token info
*
* @return token string info
* @throws IOException the IOException
*/
@ -99,10 +100,12 @@ public class EnterpriseWeChatUtils {
response.close();
}
Map<String, Object> map = JSON.parseObject(resp,
new TypeToken<Map<String, Object>>() {
}.getType());
return map.get("access_token").toString();
Map<String, String> map = JSONUtils.toMap(resp);
if (map != null) {
return map.get("access_token");
} else {
return null;
}
} finally {
httpClient.close();
}
@ -110,68 +113,73 @@ public class EnterpriseWeChatUtils {
/**
* make team single Enterprise WeChat message
*
* @param toParty the toParty
* @param agentId the agentId
* @param msg the msg
* @param msg the msg
* @return Enterprise WeChat send message
*/
public static String makeTeamSendMsg(String toParty, String agentId, String msg) {
return ENTERPRISE_WE_CHAT_TEAM_SEND_MSG.replaceAll("\\$toParty", toParty)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
return ENTERPRISE_WE_CHAT_TEAM_SEND_MSG.replaceAll("\\{toParty\\}", toParty)
.replaceAll("\\{agentId\\}", agentId)
.replaceAll("\\{msg\\}", msg);
}
/**
* make team multi Enterprise WeChat message
*
* @param toParty the toParty
* @param agentId the agentId
* @param msg the msg
* @param msg the msg
* @return Enterprise WeChat send message
*/
public static String makeTeamSendMsg(Collection<String> toParty, String agentId, String msg) {
String listParty = FuncUtils.mkString(toParty, "|");
return ENTERPRISE_WE_CHAT_TEAM_SEND_MSG.replaceAll("\\$toParty", listParty)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
return ENTERPRISE_WE_CHAT_TEAM_SEND_MSG.replaceAll("\\{toParty\\}", listParty)
.replaceAll("\\{agentId\\}", agentId)
.replaceAll("\\{msg\\}", msg);
}
/**
* make team single user message
* @param toUser the toUser
*
* @param toUser the toUser
* @param agentId the agentId
* @param msg the msg
* @param msg the msg
* @return Enterprise WeChat send message
*/
public static String makeUserSendMsg(String toUser, String agentId, String msg) {
return ENTERPRISE_WE_CHAT_USER_SEND_MSG.replaceAll("\\$toUser", toUser)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
return ENTERPRISE_WE_CHAT_USER_SEND_MSG.replaceAll("\\{toUser\\}", toUser)
.replaceAll("\\{agentId\\}", agentId)
.replaceAll("\\{msg\\}", msg);
}
/**
* make team multi user message
* @param toUser the toUser
*
* @param toUser the toUser
* @param agentId the agentId
* @param msg the msg
* @param msg the msg
* @return Enterprise WeChat send message
*/
public static String makeUserSendMsg(Collection<String> toUser, String agentId, String msg) {
String listUser = FuncUtils.mkString(toUser, "|");
return ENTERPRISE_WE_CHAT_USER_SEND_MSG.replaceAll("\\$toUser", listUser)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
return ENTERPRISE_WE_CHAT_USER_SEND_MSG.replaceAll("\\{toUser\\}", listUser)
.replaceAll("\\{agentId\\}", agentId)
.replaceAll("\\{msg\\}", msg);
}
/**
* send Enterprise WeChat
*
* @param charset the charset
* @param data the data
* @param token the token
* @param data the data
* @param token the token
* @return Enterprise WeChat resp, demo: {"errcode":0,"errmsg":"ok","invaliduser":""}
* @throws IOException the IOException
*/
public static String sendEnterpriseWeChat(String charset, String data, String token) throws IOException {
String enterpriseWeChatPushUrlReplace = ENTERPRISE_WE_CHAT_PUSH_URL.replaceAll("\\$token", token);
String enterpriseWeChatPushUrlReplace = ENTERPRISE_WE_CHAT_PUSH_URL.replaceAll("\\{token\\}", token);
CloseableHttpClient httpClient = HttpClients.createDefault();
try {
@ -196,21 +204,22 @@ public class EnterpriseWeChatUtils {
/**
* convert table to markdown style
* @param title the title
*
* @param title the title
* @param content the content
* @return markdown table content
*/
public static String markdownTable(String title,String content){
public static String markdownTable(String title, String content) {
List<LinkedHashMap> mapItemsList = JSONUtils.toList(content, LinkedHashMap.class);
StringBuilder contents = new StringBuilder(200);
if (null != mapItemsList) {
for (LinkedHashMap mapItems : mapItemsList){
for (LinkedHashMap mapItems : mapItemsList) {
Set<Map.Entry<String, String>> entries = mapItems.entrySet();
Iterator<Map.Entry<String, String>> iterator = entries.iterator();
StringBuilder t = new StringBuilder(String.format("`%s`%s",title,Constants.MARKDOWN_ENTER));
StringBuilder t = new StringBuilder(String.format("`%s`%s", title, Constants.MARKDOWN_ENTER));
while (iterator.hasNext()){
while (iterator.hasNext()) {
Map.Entry<String, String> entry = iterator.next();
t.append(Constants.MARKDOWN_QUOTE);
@ -225,23 +234,24 @@ public class EnterpriseWeChatUtils {
/**
* convert text to markdown style
* @param title the title
*
* @param title the title
* @param content the content
* @return markdown text
*/
public static String markdownText(String title,String content){
if (StringUtils.isNotEmpty(content)){
public static String markdownText(String title, String content) {
if (StringUtils.isNotEmpty(content)) {
List<String> list;
try {
list = JSONUtils.toList(content,String.class);
}catch (Exception e){
logger.error("json format exception",e);
list = JSONUtils.toList(content, String.class);
} catch (Exception e) {
logger.error("json format exception", e);
return null;
}
StringBuilder contents = new StringBuilder(100);
contents.append(String.format("`%s`%n",title));
for (String str : list){
contents.append(String.format("`%s`%n", title));
for (String str : list) {
contents.append(Constants.MARKDOWN_QUOTE);
contents.append(str);
contents.append(Constants.MARKDOWN_ENTER);
@ -255,14 +265,15 @@ public class EnterpriseWeChatUtils {
/**
* Determine the mardown style based on the show type of the alert
*
* @return the markdown alert table/text
*/
public static String markdownByAlert(AlertData alert){
public static String markdownByAlert(AlertData alert) {
String result = "";
if (alert.getShowType().equals(ShowType.TABLE.getDescp())) {
result = markdownTable(alert.getTitle(),alert.getContent());
}else if(alert.getShowType().equals(ShowType.TEXT.getDescp())){
result = markdownText(alert.getTitle(),alert.getContent());
result = markdownTable(alert.getTitle(), alert.getContent());
} else if (alert.getShowType().equals(ShowType.TEXT.getDescp())) {
result = markdownText(alert.getTitle(), alert.getContent());
}
return result;

1
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/ExcelUtils.java

@ -30,6 +30,7 @@ import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.*;
import org.apache.dolphinscheduler.common.utils.*;
/**
* excel utils

69
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/JSONUtils.java

@ -1,69 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.alert.utils;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.List;
/**
* json utils
*/
public class JSONUtils {
private static final Logger logger = LoggerFactory.getLogger(JSONUtils.class);
/**
* object to json string
* @param object the object to be converted to json
* @return json string
*/
public static String toJsonString(Object object) {
try{
return JSON.toJSONString(object,false);
} catch (Exception e) {
throw new RuntimeException("Json deserialization exception.", e);
}
}
/**
* json to list
*
* @param json the json
* @param clazz c
* @param <T> the generic clazz
* @return the result list or empty list
*/
public static <T> List<T> toList(String json, Class<T> clazz) {
if (StringUtils.isEmpty(json)) {
return Collections.emptyList();
}
try {
return JSON.parseArray(json, clazz);
} catch (Exception e) {
logger.error("JSONArray.parseArray exception!",e);
}
return Collections.emptyList();
}
}

5
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java

@ -65,6 +65,10 @@ public class MailUtils {
public static final AlertTemplate alertTemplate = AlertTemplateFactory.getMessageTemplate();
//Solve the problem of messy Chinese name in excel attachment
static {
System.setProperty("mail.mime.splitlongparameters","false");
}
/**
* send mail to receivers
@ -341,4 +345,5 @@ public class MailUtils {
retMap.put(Constants.MESSAGE, "Send email to {" + String.join(",", receivers) + "} failed," + e.toString());
}
}

8
dolphinscheduler-alert/src/main/resources/alert.properties

@ -41,10 +41,10 @@ enterprise.wechat.enable=false
#enterprise.wechat.secret=xxxxxxx
#enterprise.wechat.agent.id=xxxxxxx
#enterprise.wechat.users=xxxxxxx
#enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
#enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
#enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
#enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
#enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={corpId}&corpsecret={secret}
#enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={token}
#enterprise.wechat.team.send.msg={\"toparty\":\"{toParty}\",\"agentid\":\"{agentId}\",\"msgtype\":\"text\",\"text\":{\"content\":\"{msg}\"},\"safe\":\"0\"}
#enterprise.wechat.user.send.msg={\"touser\":\"{toUser}\",\"agentid\":\"{agentId}\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"{msg}\"}}
plugin.dir=/Users/xx/your/path/to/plugin/dir

2
dolphinscheduler-alert/src/main/resources/logback-alert.xml

@ -46,7 +46,7 @@
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
<appender-ref ref="ALERTLOGFILE"/>
</root>
</configuration>

2
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java

@ -47,7 +47,7 @@ public class EmailAlertPluginTest {
@Test
public void getId() {
String id = plugin.getId();
assertEquals(Constants.PLUGIN_DEFAULT_EMAIL, id);
assertEquals(Constants.PLUGIN_DEFAULT_EMAIL_ID, id);
}
@Test

45
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/impl/DefaultHTMLTemplateTest.java

@ -16,7 +16,8 @@
*/
package org.apache.dolphinscheduler.alert.template.impl;
import org.apache.dolphinscheduler.alert.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.junit.Test;
import org.slf4j.Logger;
@ -60,16 +61,16 @@ public class DefaultHTMLTemplateTest{
LinkedHashMap<String, Object> map1 = new LinkedHashMap<>();
map1.put("mysql service name","mysql200");
map1.put("mysql address","192.168.xx.xx");
map1.put("database client connections","190");
map1.put("port","3306");
map1.put("no index of number","80");
map1.put("database client connections","190");
LinkedHashMap<String, Object> map2 = new LinkedHashMap<>();
map2.put("mysql service name","mysql210");
map2.put("mysql address","192.168.xx.xx");
map2.put("database client connections","90");
map2.put("port","3306");
map2.put("no index of number","10");
map2.put("database client connections","90");
List<LinkedHashMap<String, Object>> maps = new ArrayList<>();
maps.add(0,map1);
@ -82,42 +83,14 @@ public class DefaultHTMLTemplateTest{
private String generateMockTableTypeResultByHand(){
return "<html>\n" +
" <head>\n" +
" <title>dolphinscheduler</title>\n" +
" <meta name='Keywords' content=''>\n" +
" <meta name='Description' content=''>\n" +
" <style type=\"text/css\">\n" +
" table {margin-top:0px;padding-top:0px;border:1px solid;font-size: 14px;color: #333333;border-width: 1px;border-color: #666666;border-collapse: collapse;}\n" +
" table th {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #dedede;text-align: right;}\n" +
" table td {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #ffffff;text-align: right;}\n" +
" </style>\n" +
" </head>\n" +
" <body style=\"margin:0;padding:0\">\n" +
" <table border=\"1px\" cellpadding=\"5px\" cellspacing=\"-10px\">\n" +
"<thead><tr><th>mysql service name</th><th>mysql address</th><th>port</th><th>no index of number</th><th>database client connections</th></tr></thead>\n" +
"<tr><td>mysql200</td><td>192.168.xx.xx</td><td>3306</td><td>80</td><td>190</td></tr><tr><td>mysql210</td><td>192.168.xx.xx</td><td>3306</td><td>10</td><td>90</td></tr> </table>\n" +
" </body>\n" +
"</html>";
return Constants.HTML_HEADER_PREFIX +
"<thead><tr><th>mysql service name</th><th>mysql address</th><th>database client connections</th><th>port</th><th>no index of number</th></tr></thead>\n" +
"<tr><td>mysql200</td><td>192.168.xx.xx</td><td>190</td><td>3306</td><td>80</td></tr><tr><td>mysql210</td><td>192.168.xx.xx</td><td>90</td><td>3306</td><td>10</td></tr>" + Constants.TABLE_BODY_HTML_TAIL;
}
private String generateMockTextTypeResultByHand(){
return "<html>\n" +
" <head>\n" +
" <title>dolphinscheduler</title>\n" +
" <meta name='Keywords' content=''>\n" +
" <meta name='Description' content=''>\n" +
" <style type=\"text/css\">\n" +
" table {margin-top:0px;padding-top:0px;border:1px solid;font-size: 14px;color: #333333;border-width: 1px;border-color: #666666;border-collapse: collapse;}\n" +
" table th {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #dedede;text-align: right;}\n" +
" table td {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #ffffff;text-align: right;}\n" +
" </style>\n" +
" </head>\n" +
" <body style=\"margin:0;padding:0\">\n" +
" <table border=\"1px\" cellpadding=\"5px\" cellspacing=\"-10px\">\n" +
"<tr><td>{\"mysql service name\":\"mysql200\",\"mysql address\":\"192.168.xx.xx\",\"database client connections\":\"190\",\"port\":\"3306\",\"no index of number\":\"80\"}</td></tr><tr><td>{\"mysql service name\":\"mysql210\",\"mysql address\":\"192.168.xx.xx\",\"database client connections\":\"90\",\"port\":\"3306\",\"no index of number\":\"10\"}</td></tr> </table>\n" +
" </body>\n" +
"</html>";
return Constants.HTML_HEADER_PREFIX + "<tr><td>{\"mysql service name\":\"mysql200\",\"mysql address\":\"192.168.xx.xx\",\"database client connections\":\"190\",\"port\":\"3306\",\"no index of number\":\"80\"}</td></tr><tr><td>{\"mysql service name\":\"mysql210\",\"mysql address\":\"192.168.xx.xx\",\"database client connections\":\"90\",\"port\":\"3306\",\"no index of number\":\"10\"}</td></tr>" + Constants.TABLE_BODY_HTML_TAIL;
}
}

5
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/DingTalkUtilsTest.java

@ -16,13 +16,10 @@
*/
package org.apache.dolphinscheduler.alert.utils;
import com.alibaba.fastjson.JSON;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
@ -67,7 +64,7 @@ public class DingTalkUtilsTest {
// logger.info(PropertyUtils.getString(Constants.DINGTALK_WEBHOOK));
// String rsp = DingTalkUtils.sendDingTalkMsg(msgTosend, Constants.UTF_8);
// logger.info("send msg result:{}",rsp);
// String errmsg = JSON.parseObject(rsp).getString("errmsg");
// String errmsg = JSONUtils.parseObject(rsp).getString("errmsg");
// Assert.assertEquals("ok", errmsg);
// } catch (Exception e) {
// e.printStackTrace();

15
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java

@ -16,14 +16,12 @@
*/
package org.apache.dolphinscheduler.alert.utils;
import com.alibaba.fastjson.JSON;
import org.apache.dolphinscheduler.common.enums.AlertType;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.dao.entity.Alert;
import org.apache.dolphinscheduler.plugin.model.AlertData;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
@ -34,6 +32,7 @@ import org.powermock.modules.junit4.PowerMockRunner;
import java.io.IOException;
import java.util.*;
import org.apache.dolphinscheduler.common.utils.*;
/**
* Please manually modify the configuration file before testing.
@ -56,8 +55,8 @@ public class EnterpriseWeChatUtilsTest {
private static final String enterpriseWechatUsers="LiGang,journey";
private static final String msg = "hello world";
private static final String enterpriseWechatTeamSendMsg = "{\\\"toparty\\\":\\\"$toParty\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"text\\\",\\\"text\\\":{\\\"content\\\":\\\"$msg\\\"},\\\"safe\\\":\\\"0\\\"}";
private static final String enterpriseWechatUserSendMsg = "{\\\"touser\\\":\\\"$toUser\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"markdown\\\",\\\"markdown\\\":{\\\"content\\\":\\\"$msg\\\"}}";
private static final String enterpriseWechatTeamSendMsg = "{\\\"toparty\\\":\\\"{toParty}\\\",\\\"agentid\\\":\\\"{agentId}\\\",\\\"msgtype\\\":\\\"text\\\",\\\"text\\\":{\\\"content\\\":\\\"{msg}\\\"},\\\"safe\\\":\\\"0\\\"}";
private static final String enterpriseWechatUserSendMsg = "{\\\"touser\\\":\\\"{toUser}\\\",\\\"agentid\\\":\\\"{agentId}\\\",\\\"msgtype\\\":\\\"markdown\\\",\\\"markdown\\\":{\\\"content\\\":\\\"{msg}\\\"}}";
@Before
public void init(){
@ -206,7 +205,7 @@ public class EnterpriseWeChatUtilsTest {
// String msg = EnterpriseWeChatUtils.makeTeamSendMsg(partyId, agentId, "hello world");
// String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
//
// String errmsg = JSON.parseObject(resp).getString("errmsg");
// String errmsg = JSONUtils.parseObject(resp).getString("errmsg");
// Assert.assertEquals("ok",errmsg);
// } catch (IOException e) {
// e.printStackTrace();
@ -221,7 +220,7 @@ public class EnterpriseWeChatUtilsTest {
// String msg = EnterpriseWeChatUtils.makeTeamSendMsg(listPartyId, agentId, "hello world");
// String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
//
// String errmsg = JSON.parseObject(resp).getString("errmsg");
// String errmsg = JSONUtils.parseObject(resp).getString("errmsg");
// Assert.assertEquals("ok",errmsg);
// } catch (IOException e) {
// e.printStackTrace();
@ -248,7 +247,7 @@ public class EnterpriseWeChatUtilsTest {
//
// String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
//
// String errmsg = JSON.parseObject(resp).getString("errmsg");
// String errmsg = JSONUtils.parseObject(resp).getString("errmsg");
// Assert.assertEquals("ok",errmsg);
// } catch (IOException e) {
// e.printStackTrace();
@ -263,7 +262,7 @@ public class EnterpriseWeChatUtilsTest {
// String msg = EnterpriseWeChatUtils.makeUserSendMsg(listUserId, agentId, "hello world");
// String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
//
// String errmsg = JSON.parseObject(resp).getString("errmsg");
// String errmsg = JSONUtils.parseObject(resp).getString("errmsg");
// Assert.assertEquals("ok",errmsg);
// } catch (IOException e) {
// e.printStackTrace();

112
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/JSONUtilsTest.java

@ -1,112 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.alert.utils;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import static org.junit.Assert.*;
public class JSONUtilsTest {
private static final Logger logger = LoggerFactory.getLogger(JSONUtilsTest.class);
public List<LinkedHashMap<String, Object>> list = new ArrayList<>();
public String expected = null;
@Before
public void setUp() throws Exception {
//Define expected json string
expected = "[{\"mysql service name\":\"mysql200\",\"mysql address\":\"192.168.xx.xx\",\"port\":\"3306\",\"no index of number\":\"80\",\"database client connections\":\"190\"}]";
//Initial map
LinkedHashMap<String, Object> map = new LinkedHashMap<>();
map.put("mysql service name","mysql200");
map.put("mysql address","192.168.xx.xx");
map.put("port","3306");
map.put("no index of number","80");
map.put("database client connections","190");
//Add map into list
list.add(map);
}
/**
* Test toJsonString
*/
@Test
public void testToJsonString() {
//Invoke toJsonString
String result = JSONUtils.toJsonString(list);
logger.info(result);
//Equal result with expected string
assertEquals(result,expected);
//If param is null, then return null string
result = JSONUtils.toJsonString(null);
logger.info(result);
assertEquals("null", result);
}
/**
* Test toList
*/
@Test
public void testToList() {
//Invoke toList
List<LinkedHashMap> result = JSONUtils.toList(expected ,LinkedHashMap.class);
//Equal list size=1
assertEquals(1,result.size());
//Transform entity to LinkedHashMap<String, Object>
LinkedHashMap<String, Object> entity = result.get(0);
//Equal expected values
assertEquals("mysql200",entity.get("mysql service name"));
assertEquals("192.168.xx.xx", entity.get("mysql address"));
assertEquals("3306", entity.get("port"));
assertEquals("80", entity.get("no index of number"));
assertEquals("190", entity.get("database client connections"));
//If param is null, then return empty list
result = JSONUtils.toList(null ,LinkedHashMap.class);
assertNotNull(result);
assertTrue(result.isEmpty());
//If param is incorrect, then return empty list and log error message
result = JSONUtils.toList("}{" ,LinkedHashMap.class);
assertNotNull(result);
assertTrue(result.isEmpty());
}
}

1
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/MailUtilsTest.java

@ -29,6 +29,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import org.apache.dolphinscheduler.common.utils.*;
/**

7
dolphinscheduler-api/pom.xml

@ -21,7 +21,7 @@
<parent>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.2.1-SNAPSHOT</version>
<version>1.3.2-SNAPSHOT</version>
</parent>
<artifactId>dolphinscheduler-api</artifactId>
<name>${project.artifactId}</name>
@ -86,11 +86,6 @@
<artifactId>spring-context</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>

3
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/configuration/AppConfiguration.java

@ -35,6 +35,7 @@ public class AppConfiguration implements WebMvcConfigurer {
public static final String LOGIN_INTERCEPTOR_PATH_PATTERN = "/**/*";
public static final String LOGIN_PATH_PATTERN = "/login";
public static final String REGISTER_PATH_PATTERN = "/users/register";
public static final String PATH_PATTERN = "/**";
public static final String LOCALE_LANGUAGE_COOKIE = "language";
public static final int COOKIE_MAX_AGE = 3600;
@ -76,7 +77,7 @@ public class AppConfiguration implements WebMvcConfigurer {
//i18n
registry.addInterceptor(localeChangeInterceptor());
registry.addInterceptor(loginInterceptor()).addPathPatterns(LOGIN_INTERCEPTOR_PATH_PATTERN).excludePathPatterns(LOGIN_PATH_PATTERN,"/swagger-resources/**", "/webjars/**", "/v2/**", "/doc.html", "*.html", "/ui/**");
registry.addInterceptor(loginInterceptor()).addPathPatterns(LOGIN_INTERCEPTOR_PATH_PATTERN).excludePathPatterns(LOGIN_PATH_PATTERN, REGISTER_PATH_PATTERN, "/swagger-resources/**", "/webjars/**", "/v2/**", "/doc.html", "*.html", "/ui/**");
}

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java

@ -278,7 +278,7 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, connectType: {}, other: {}",
logger.info("login user {}, connect datasource: {}, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, connectType, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Boolean isConnection = dataSourceService.checkConnection(type, parameter);

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java

@ -63,7 +63,7 @@ public class ResourcesController extends BaseController {
private UdfFuncService udfFuncService;
/**
* create resource
* create directory
*
* @param loginUser login user
* @param alias alias

31
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/UsersController.java

@ -410,5 +410,36 @@ public class UsersController extends BaseController {
}
}
/**
* user register
*
* @param userName user name
* @param userPassword user password
* @param repeatPassword repeat password
* @param email user email
*/
@ApiOperation(value="registerUser",notes = "REGISTER_USER_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userName", value = "USER_NAME", type = "String"),
@ApiImplicitParam(name = "userPassword", value = "USER_PASSWORD", type = "String"),
@ApiImplicitParam(name = "repeatPassword", value = "REPEAT_PASSWORD", type = "String"),
@ApiImplicitParam(name = "email", value = "EMAIL", type = "String"),
})
@PostMapping("/register")
@ResponseStatus(HttpStatus.OK)
@ApiException(CREATE_USER_ERROR)
public Result<Object> registerUser(@RequestParam(value = "userName") String userName,
@RequestParam(value = "userPassword") String userPassword,
@RequestParam(value = "repeatPassword") String repeatPassword,
@RequestParam(value = "email") String email) throws Exception {
userName = userName.replaceAll("[\n|\r|\t]", "");
userPassword = userPassword.replaceAll("[\n|\r|\t]", "");
repeatPassword = repeatPassword.replaceAll("[\n|\r|\t]", "");
email = email.replaceAll("[\n|\r|\t]", "");
logger.info("user self-register, userName: {}, userPassword {}, repeatPassword {}, eamil {}",
userName, userPassword, repeatPassword, email);
Map<String, Object> result = usersService.registerUser(userName, userPassword, repeatPassword, email);
return returnDataList(result);
}
}

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/ScheduleParam.java

@ -16,13 +16,17 @@
*/
package org.apache.dolphinscheduler.api.dto;
import com.fasterxml.jackson.annotation.JsonFormat;
import java.util.Date;
/**
* schedule parameters
*/
public class ScheduleParam {
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss",timezone="GMT+8")
private Date startTime;
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss",timezone="GMT+8")
private Date endTime;
private String crontab;

5
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/gantt/Task.java

@ -16,6 +16,8 @@
*/
package org.apache.dolphinscheduler.api.dto.gantt;
import com.fasterxml.jackson.annotation.JsonFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
@ -41,16 +43,19 @@ public class Task {
/**
* task execution date
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss",timezone="GMT+8")
private Date executionDate;
/**
* task iso start
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss",timezone="GMT+8")
private Date isoStart;
/**
* task iso end
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss",timezone="GMT+8")
private Date isoEnd;
/**

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java

@ -1,5 +1,3 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -16,6 +14,8 @@ package org.apache.dolphinscheduler.api.dto.resources;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources;
/**
* directory
*/

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java

@ -1,5 +1,3 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -16,6 +14,8 @@ package org.apache.dolphinscheduler.api.dto.resources;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources;
/**
* file leaf
*/

28
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java

@ -1,12 +1,3 @@
package org.apache.dolphinscheduler.api.dto.resources;
import com.alibaba.fastjson.annotation.JSONField;
import com.alibaba.fastjson.annotation.JSONType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import java.util.ArrayList;
import java.util.List;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -23,10 +14,18 @@ import java.util.List;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import java.util.ArrayList;
import java.util.List;
/**
* resource component
*/
@JSONType(orders={"id","pid","name","fullName","description","isDirctory","children","type"})
@JsonPropertyOrder({"id","pid","name","fullName","description","isDirctory","children","type"})
public abstract class ResourceComponent {
public ResourceComponent() {
}
@ -46,17 +45,14 @@ public abstract class ResourceComponent {
/**
* id
*/
@JSONField(ordinal = 1)
protected int id;
/**
* parent id
*/
@JSONField(ordinal = 2)
protected int pid;
/**
* name
*/
@JSONField(ordinal = 3)
protected String name;
/**
* current directory
@ -65,32 +61,26 @@ public abstract class ResourceComponent {
/**
* full name
*/
@JSONField(ordinal = 4)
protected String fullName;
/**
* description
*/
@JSONField(ordinal = 5)
protected String description;
/**
* is directory
*/
@JSONField(ordinal = 6)
protected boolean isDirctory;
/**
* id value
*/
@JSONField(ordinal = 7)
protected String idValue;
/**
* resoruce type
*/
@JSONField(ordinal = 8)
protected ResourceType type;
/**
* children
*/
@JSONField(ordinal = 8)
protected List<ResourceComponent> children = new ArrayList<>();
/**

9
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java

@ -1,8 +1,3 @@
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -19,6 +14,10 @@ import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
/**
* Visitor
*/

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/treeview/Instance.java

@ -16,6 +16,8 @@
*/
package org.apache.dolphinscheduler.api.dto.treeview;
import com.fasterxml.jackson.annotation.JsonFormat;
import java.util.Date;
/**
@ -42,11 +44,13 @@ public class Instance {
/**
* node start time
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss",timezone="GMT+8")
private Date startTime;
/**
* node end time
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss",timezone="GMT+8")
private Date endTime;

6
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java

@ -35,7 +35,7 @@ public enum Status {
USER_NAME_NULL(10004,"user name is null", "用户名不能为空"),
HDFS_OPERATION_ERROR(10006, "hdfs operation error", "hdfs操作错误"),
TASK_INSTANCE_NOT_FOUND(10008, "task instance not found", "任务实例不存在"),
TENANT_NAME_EXIST(10009, "tenant code already exists", "租户编码不能为空"),
TENANT_NAME_EXIST(10009, "tenant code {0} already exists", "租户编码[{0}]已存在"),
USER_NOT_EXIST(10010, "user {0} not exists", "用户[{0}]不存在"),
ALERT_GROUP_NOT_EXIST(10011, "alarm group not found", "告警组不存在"),
ALERT_GROUP_EXIST(10012, "alarm group already exists", "告警组名称已存在"),
@ -192,7 +192,7 @@ public enum Status {
RESOURCE_IS_USED(20014, "resource file is used by process definition","资源文件被上线的流程定义使用了"),
PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist","父资源文件不存在"),
RESOURCE_NOT_EXIST_OR_NO_PERMISSION(20016, "resource not exist or no permission,please view the task node and remove error resource","请检查任务节点并移除无权限或者已删除的资源"),
RESOURCE_IS_AUTHORIZED(20017, "resource is authorized to user {0},suffix not allowed to be modified", "资源文件已授权其他用户[{0}],后缀不允许修改"),
USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"),
USER_NO_OPERATION_PROJECT_PERM(30002, "user {0} is not has project {1} permission", "当前用户[{0}]没有[{1}]项目的操作权限"),
@ -218,7 +218,7 @@ public enum Status {
DATA_IS_NOT_VALID(50017,"data {0} not valid", "数据[{0}]无效"),
DATA_IS_NULL(50018,"data {0} is null", "数据[{0}]不能为空"),
PROCESS_NODE_HAS_CYCLE(50019,"process node has cycle", "流程节点间存在循环依赖"),
PROCESS_NODE_S_PARAMETER_INVALID(50020,"process node %s parameter invalid", "流程节点[%s]参数无效"),
PROCESS_NODE_S_PARAMETER_INVALID(50020,"process node {0} parameter invalid", "流程节点[{0}]参数无效"),
PROCESS_DEFINE_STATE_ONLINE(50021, "process definition {0} is already on line", "工作流定义[{0}]已上线"),
DELETE_PROCESS_DEFINE_BY_ID_ERROR(50022,"delete process definition by id error", "删除工作流定义错误"),
SCHEDULE_CRON_STATE_ONLINE(50023,"the status of schedule {0} is already on line", "调度配置[{0}]已上线"),

18
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/interceptor/LoginHandlerInterceptor.java

@ -50,20 +50,10 @@ public class LoginHandlerInterceptor implements HandlerInterceptor {
/**
* Intercept the execution of a handler. Called after HandlerMapping determined
* an appropriate handler object, but before HandlerAdapter invokes the handler.
* <p>DispatcherServlet processes a handler in an execution chain, consisting
* of any number of interceptors, with the handler itself at the end.
* With this method, each interceptor can decide to abort the execution chain,
* typically sending a HTTP error or writing a custom response.
* <p><strong>Note:</strong> special considerations apply for asynchronous
* request processing. For more details see
* {@link org.springframework.web.servlet.AsyncHandlerInterceptor}.
* @param request current HTTP request
* @param response current HTTP response
* @param handler chosen handler to execute, for type and/or instance evaluation
* @return {@code true} if the execution chain should proceed with the
* next interceptor or the handler itself. Else, DispatcherServlet assumes
* that this interceptor has already dealt with the response itself.
* @param request current HTTP request
* @param response current HTTP response
* @param handler chosen handler to execute, for type and/or instance evaluation
* @return boolean true or false
*/
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) {

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/BaseDAGService.java

@ -20,7 +20,7 @@ import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;

91
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java

@ -16,11 +16,10 @@
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
@ -28,7 +27,7 @@ import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.datasource.*;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.Resource;
@ -159,8 +158,18 @@ public class DataSourceService extends BaseService{
putMsg(result, Status.DATASOURCE_EXIST);
return result;
}
//check password,if the password is not updated, set to the old password.
ObjectNode paramObject = JSONUtils.parseObject(parameter);
String password = paramObject.path(Constants.PASSWORD).asText();
if (StringUtils.isBlank(password)) {
String oldConnectionParams = dataSource.getConnectionParams();
ObjectNode oldParams = JSONUtils.parseObject(oldConnectionParams);
paramObject.put(Constants.PASSWORD, oldParams.path(Constants.PASSWORD).asText());
}
// connectionParams json
String connectionParams = paramObject.toString();
Boolean isConnection = checkConnection(type, parameter);
Boolean isConnection = checkConnection(type, connectionParams);
if (!isConnection) {
logger.info("connect failed, type:{}, parameter:{}", type, parameter);
putMsg(result, Status.DATASOURCE_CONNECT_FAILED);
@ -172,7 +181,7 @@ public class DataSourceService extends BaseService{
dataSource.setNote(desc);
dataSource.setUserName(loginUser.getUserName());
dataSource.setType(type);
dataSource.setConnectionParams(parameter);
dataSource.setConnectionParams(connectionParams);
dataSource.setUpdateTime(now);
dataSourceMapper.updateById(dataSource);
putMsg(result, Status.SUCCESS);
@ -211,12 +220,20 @@ public class DataSourceService extends BaseService{
String parameter = dataSource.getConnectionParams();
BaseDataSource datasourceForm = DataSourceFactory.getDatasource(dataSource.getType(), parameter);
DbConnectType connectType = null;
String hostSeperator = Constants.DOUBLE_SLASH;
if(DbType.ORACLE.equals(dataSource.getType())){
connectType = ((OracleDataSource) datasourceForm).getConnectType();
if(DbConnectType.ORACLE_SID.equals(connectType)){
hostSeperator = Constants.AT_SIGN;
}
}
String database = datasourceForm.getDatabase();
// jdbc connection params
String other = datasourceForm.getOther();
String address = datasourceForm.getAddress();
String[] hostsPorts = getHostsAndPort(address);
String[] hostsPorts = getHostsAndPort(address,hostSeperator);
// ip host
String host = hostsPorts[0];
// prot
@ -252,12 +269,15 @@ public class DataSourceService extends BaseService{
map.put(NAME, dataSourceName);
map.put(NOTE, desc);
map.put(TYPE, dataSourceType);
if (connectType != null) {
map.put(Constants.ORACLE_DB_CONNECT_TYPE, connectType);
}
map.put(HOST, host);
map.put(PORT, port);
map.put(PRINCIPAL, datasourceForm.getPrincipal());
map.put(DATABASE, database);
map.put(USER_NAME, datasourceForm.getUser());
map.put(PASSWORD, datasourceForm.getPassword());
map.put(OTHER, otherMap);
result.put(Constants.DATA_LIST, map);
putMsg(result, Status.SUCCESS);
@ -305,9 +325,9 @@ public class DataSourceService extends BaseService{
for (DataSource dataSource : dataSourceList) {
String connectionParams = dataSource.getConnectionParams();
JSONObject object = JSON.parseObject(connectionParams);
ObjectNode object = JSONUtils.parseObject(connectionParams);
object.put(Constants.PASSWORD, Constants.XXXXXX);
dataSource.setConnectionParams(JSONUtils.toJson(object));
dataSource.setConnectionParams(object.toString());
}
}
@ -369,11 +389,11 @@ public class DataSourceService extends BaseService{
try {
switch (dbType) {
case POSTGRESQL:
datasource = JSON.parseObject(parameter, PostgreDataSource.class);
datasource = JSONUtils.parseObject(parameter, PostgreDataSource.class);
Class.forName(Constants.ORG_POSTGRESQL_DRIVER);
break;
case MYSQL:
datasource = JSON.parseObject(parameter, MySQLDataSource.class);
datasource = JSONUtils.parseObject(parameter, MySQLDataSource.class);
Class.forName(Constants.COM_MYSQL_JDBC_DRIVER);
break;
case HIVE:
@ -388,26 +408,26 @@ public class DataSourceService extends BaseService{
getString(org.apache.dolphinscheduler.common.Constants.LOGIN_USER_KEY_TAB_PATH));
}
if (dbType == DbType.HIVE){
datasource = JSON.parseObject(parameter, HiveDataSource.class);
datasource = JSONUtils.parseObject(parameter, HiveDataSource.class);
}else if (dbType == DbType.SPARK){
datasource = JSON.parseObject(parameter, SparkDataSource.class);
datasource = JSONUtils.parseObject(parameter, SparkDataSource.class);
}
Class.forName(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER);
break;
case CLICKHOUSE:
datasource = JSON.parseObject(parameter, ClickHouseDataSource.class);
datasource = JSONUtils.parseObject(parameter, ClickHouseDataSource.class);
Class.forName(Constants.COM_CLICKHOUSE_JDBC_DRIVER);
break;
case ORACLE:
datasource = JSON.parseObject(parameter, OracleDataSource.class);
datasource = JSONUtils.parseObject(parameter, OracleDataSource.class);
Class.forName(Constants.COM_ORACLE_JDBC_DRIVER);
break;
case SQLSERVER:
datasource = JSON.parseObject(parameter, SQLServerDataSource.class);
datasource = JSONUtils.parseObject(parameter, SQLServerDataSource.class);
Class.forName(Constants.COM_SQLSERVER_JDBC_DRIVER);
break;
case DB2:
datasource = JSON.parseObject(parameter, DB2ServerDataSource.class);
datasource = JSONUtils.parseObject(parameter, DB2ServerDataSource.class);
Class.forName(Constants.COM_DB2_JDBC_DRIVER);
break;
default:
@ -478,13 +498,10 @@ public class DataSourceService extends BaseService{
String password, DbConnectType connectType, String other) {
String address = buildAddress(type, host, port, connectType);
String jdbcUrl;
if (Constants.ORACLE.equals(type.name())
&& connectType == DbConnectType.ORACLE_SID) {
jdbcUrl = address + ":" + database;
} else {
jdbcUrl = address + "/" + database;
Map<String, Object> parameterMap = new LinkedHashMap<String, Object>(6);
String jdbcUrl = address + "/" + database;
if (Constants.ORACLE.equals(type.name())) {
parameterMap.put(Constants.ORACLE_DB_CONNECT_TYPE, connectType);
}
if (CommonUtils.getKerberosStartupState() &&
@ -505,7 +522,7 @@ public class DataSourceService extends BaseService{
separator = ";";
}
Map<String, Object> parameterMap = new LinkedHashMap<String, Object>(6);
parameterMap.put(TYPE, connectType);
parameterMap.put(Constants.ADDRESS, address);
parameterMap.put(Constants.DATABASE, database);
parameterMap.put(Constants.JDBC_URL, jdbcUrl);
@ -516,8 +533,7 @@ public class DataSourceService extends BaseService{
parameterMap.put(Constants.PRINCIPAL,principal);
}
if (other != null && !"".equals(other)) {
LinkedHashMap<String, String> map = JSON.parseObject(other, new TypeReference<LinkedHashMap<String, String>>() {
});
Map<String, String> map = JSONUtils.toMap(other);
if (map.size() > 0) {
StringBuilder otherSb = new StringBuilder();
for (Map.Entry<String, String> entry: map.entrySet()) {
@ -532,9 +548,9 @@ public class DataSourceService extends BaseService{
}
if(logger.isDebugEnabled()){
logger.info("parameters map-----" + JSON.toJSONString(parameterMap));
logger.info("parameters map:{}", JSONUtils.toJsonString(parameterMap));
}
return JSON.toJSONString(parameterMap);
return JSONUtils.toJsonString(parameterMap);
}
@ -675,12 +691,23 @@ public class DataSourceService extends BaseService{
/**
* get host and port by address
*
* @param address
* @param address address
* @return sting array: [host,port]
*/
private String[] getHostsAndPort(String address) {
return getHostsAndPort(address,Constants.DOUBLE_SLASH);
}
/**
* get host and port by address
*
* @param address address
* @param separator separator
* @return sting array: [host,port]
*/
private String[] getHostsAndPort(String address,String separator) {
String[] result = new String[2];
String[] tmpArray = address.split(Constants.DOUBLE_SLASH);
String[] tmpArray = address.split(separator);
String hostsAndPorts = tmpArray[tmpArray.length - 1];
StringBuilder hosts = new StringBuilder();
String[] hostPortArray = hostsAndPorts.split(Constants.COMMA);

12
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java

@ -24,7 +24,7 @@ import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
@ -509,7 +509,7 @@ public class ExecutorService extends BaseService{
if(warningType != null){
command.setWarningType(warningType);
}
command.setCommandParam(JSONUtils.toJson(cmdParam));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
command.setExecutorId(executorId);
command.setWarningGroupId(warningGroupId);
command.setProcessInstancePriority(processInstancePriority);
@ -532,7 +532,7 @@ public class ExecutorService extends BaseService{
if(runMode == RunMode.RUN_MODE_SERIAL){
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end));
command.setCommandParam(JSONUtils.toJson(cmdParam));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
return processService.createCommand(command);
}else if (runMode == RunMode.RUN_MODE_PARALLEL){
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefineId);
@ -547,7 +547,7 @@ public class ExecutorService extends BaseService{
for (Date date : listDate) {
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(date));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(date));
command.setCommandParam(JSONUtils.toJson(cmdParam));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
processService.createCommand(command);
}
return listDate.size();
@ -558,7 +558,7 @@ public class ExecutorService extends BaseService{
runCunt += 1;
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(start));
command.setCommandParam(JSONUtils.toJson(cmdParam));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
processService.createCommand(command);
start = DateUtils.getSomeDay(start, 1);
}
@ -570,7 +570,7 @@ public class ExecutorService extends BaseService{
processDefineId, schedule);
}
}else{
command.setCommandParam(JSONUtils.toJson(cmdParam));
command.setCommandParam(JSONUtils.toJsonString(cmdParam));
return processService.createCommand(command);
}

48
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java

@ -16,6 +16,9 @@
*/
package org.apache.dolphinscheduler.api.service;
import java.nio.charset.StandardCharsets;
import javax.annotation.PreDestroy;
import org.apache.commons.lang.ArrayUtils;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
@ -29,8 +32,6 @@ import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PreDestroy;
/**
* log service
*/
@ -39,17 +40,19 @@ public class LoggerService {
private static final Logger logger = LoggerFactory.getLogger(LoggerService.class);
private static final String LOG_HEAD_FORMAT = "[LOG-PATH]: %s, [HOST]: %s%s";
@Autowired
private ProcessService processService;
private final LogClientService logClient;
public LoggerService(){
public LoggerService() {
logClient = new LogClientService();
}
@PreDestroy
public void close(){
public void close() {
logClient.close();
}
@ -65,24 +68,34 @@ public class LoggerService {
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId);
if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())){
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())) {
return Result.error(Status.TASK_INSTANCE_NOT_FOUND);
}
String host = getHost(taskInstance.getHost());
Result result = new Result(Status.SUCCESS.getCode(), Status.SUCCESS.getMsg());
logger.info("log host : {} , logPath : {} , logServer port : {}",host,taskInstance.getLogPath(),Constants.RPC_PORT);
logger.info("log host : {} , logPath : {} , logServer port : {}", host, taskInstance.getLogPath(),
Constants.RPC_PORT);
StringBuilder log = new StringBuilder();
if (skipLineNum == 0) {
String head = String.format(LOG_HEAD_FORMAT,
taskInstance.getLogPath(),
host,
Constants.SYSTEM_LINE_SEPARATOR);
log.append(head);
}
log.append(logClient
.rollViewLog(host, Constants.RPC_PORT, taskInstance.getLogPath(), skipLineNum, limit));
String log = logClient.rollViewLog(host, Constants.RPC_PORT, taskInstance.getLogPath(),skipLineNum,limit);
result.setData(log);
return result;
}
/**
* get log size
*
@ -91,22 +104,27 @@ public class LoggerService {
*/
public byte[] getLogBytes(int taskInstId) {
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId);
if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())){
if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())) {
throw new RuntimeException("task instance is null or host is null");
}
String host = getHost(taskInstance.getHost());
return logClient.getLogBytes(host, Constants.RPC_PORT, taskInstance.getLogPath());
byte[] head = String.format(LOG_HEAD_FORMAT,
taskInstance.getLogPath(),
host,
Constants.SYSTEM_LINE_SEPARATOR).getBytes(StandardCharsets.UTF_8);
return ArrayUtils.addAll(head,
logClient.getLogBytes(host, Constants.RPC_PORT, taskInstance.getLogPath()));
}
/**
* get host
*
* @param address address
* @return old version return true ,otherwise return false
*/
private String getHost(String address){
if (Host.isOldVersion(address)){
private String getHost(String address) {
if (Host.isOldVersion(address)) {
return address;
}
return Host.of(address).getIp();

54
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/MonitorService.java

@ -16,29 +16,33 @@
*/
package org.apache.dolphinscheduler.api.service;
import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.ZookeeperMonitor;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.dao.MonitorDBDao;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.model.WorkerServerModel;
import org.apache.dolphinscheduler.dao.MonitorDBDao;
import org.apache.dolphinscheduler.dao.entity.MonitorRecord;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.entity.ZookeeperRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.apache.dolphinscheduler.common.utils.Preconditions.*;
import com.google.common.collect.Sets;
/**
* monitor service
*/
@Service
public class MonitorService extends BaseService{
public class MonitorService extends BaseService {
@Autowired
private ZookeeperMonitor zookeeperMonitor;
@ -108,15 +112,41 @@ public class MonitorService extends BaseService{
public Map<String,Object> queryWorker(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<Server> masterServers = getServerListFromZK(false);
result.put(Constants.DATA_LIST, masterServers);
List<WorkerServerModel> workerServers = getServerListFromZK(false)
.stream()
.map((Server server) -> {
WorkerServerModel model = new WorkerServerModel();
model.setId(server.getId());
model.setHost(server.getHost());
model.setPort(server.getPort());
model.setZkDirectories(Sets.newHashSet(server.getZkDirectory()));
model.setResInfo(server.getResInfo());
model.setCreateTime(server.getCreateTime());
model.setLastHeartbeatTime(server.getLastHeartbeatTime());
return model;
})
.collect(Collectors.toList());
Map<String, WorkerServerModel> workerHostPortServerMapping = workerServers
.stream()
.collect(Collectors.toMap(
(WorkerServerModel worker) -> {
String[] s = worker.getZkDirectories().iterator().next().split("/");
return s[s.length - 1];
}
, Function.identity()
, (WorkerServerModel oldOne, WorkerServerModel newOne) -> {
oldOne.getZkDirectories().addAll(newOne.getZkDirectories());
return oldOne;
}));
result.put(Constants.DATA_LIST, workerHostPortServerMapping.values());
putMsg(result,Status.SUCCESS);
return result;
}
public List<Server> getServerListFromZK(boolean isMaster){
public List<Server> getServerListFromZK(boolean isMaster) {
checkNotNull(zookeeperMonitor);
ZKNodeType zkNodeType = isMaster ? ZKNodeType.MASTER : ZKNodeType.WORKER;

417
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java

@ -16,12 +16,12 @@
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.dolphinscheduler.api.dto.ProcessMeta;
import org.apache.dolphinscheduler.api.dto.treeview.Instance;
import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto;
@ -73,6 +73,11 @@ public class ProcessDefinitionService extends BaseDAGService {
private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionService.class);
private static final String PROCESSDEFINITIONID = "processDefinitionId";
private static final String RELEASESTATE = "releaseState";
private static final String TASKS = "tasks";
@Autowired
private ProjectMapper projectMapper;
@ -99,13 +104,13 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* create process definition
*
* @param loginUser login user
* @param projectName project name
* @param name process definition name
* @param loginUser login user
* @param projectName project name
* @param name process definition name
* @param processDefinitionJson process definition json
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @return create result code
* @throws JsonProcessingException JsonProcessingException
*/
@ -159,30 +164,34 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefine.setUpdateTime(now);
processDefine.setFlag(Flag.YES);
processDefineMapper.insert(processDefine);
// return processDefinition object with ID
result.put(Constants.DATA_LIST, processDefineMapper.selectById(processDefine.getId()));
putMsg(result, Status.SUCCESS);
result.put("processDefinitionId",processDefine.getId());
result.put("processDefinitionId", processDefine.getId());
return result;
}
/**
* get resource ids
*
* @param processData process data
* @return resource ids
*/
private String getResourceIds(ProcessData processData) {
List<TaskNode> tasks = processData.getTasks();
Set<Integer> resourceIds = new HashSet<>();
for(TaskNode taskNode : tasks){
for (TaskNode taskNode : tasks) {
String taskParameter = taskNode.getParams();
AbstractParameters params = TaskParametersUtils.getParameters(taskNode.getType(),taskParameter);
AbstractParameters params = TaskParametersUtils.getParameters(taskNode.getType(), taskParameter);
if (CollectionUtils.isNotEmpty(params.getResourceFilesList())) {
Set<Integer> tempSet = params.getResourceFilesList().stream().map(t->t.getId()).collect(Collectors.toSet());
Set<Integer> tempSet = params.getResourceFilesList().stream().map(t -> t.getId()).collect(Collectors.toSet());
resourceIds.addAll(tempSet);
}
}
StringBuilder sb = new StringBuilder();
for(int i : resourceIds) {
for (int i : resourceIds) {
if (sb.length() > 0) {
sb.append(",");
}
@ -195,7 +204,7 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* query process definition list
*
* @param loginUser login user
* @param loginUser login user
* @param projectName project name
* @return definition list
*/
@ -221,12 +230,12 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* query process definition list paging
*
* @param loginUser login user
* @param loginUser login user
* @param projectName project name
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @param userId user id
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @param userId user id
* @return process definition page
*/
public Map<String, Object> queryProcessDefinitionListPaging(User loginUser, String projectName, String searchVal, Integer pageNo, Integer pageSize, Integer userId) {
@ -242,10 +251,10 @@ public class ProcessDefinitionService extends BaseDAGService {
Page<ProcessDefinition> page = new Page(pageNo, pageSize);
IPage<ProcessDefinition> processDefinitionIPage = processDefineMapper.queryDefineListPaging(
page, searchVal, userId, project.getId(),isAdmin(loginUser));
page, searchVal, userId, project.getId(), isAdmin(loginUser));
PageInfo pageInfo = new PageInfo<ProcessData>(pageNo, pageSize);
pageInfo.setTotalCount((int)processDefinitionIPage.getTotal());
pageInfo.setTotalCount((int) processDefinitionIPage.getTotal());
pageInfo.setLists(processDefinitionIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
@ -256,9 +265,9 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* query datail of process definition
*
* @param loginUser login user
* @param loginUser login user
* @param projectName project name
* @param processId process definition id
* @param processId process definition id
* @return process definition detail
*/
public Map<String, Object> queryProcessDefinitionById(User loginUser, String projectName, Integer processId) {
@ -286,12 +295,12 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* copy process definition
*
* @param loginUser login user
* @param loginUser login user
* @param projectName project name
* @param processId process definition id
* @param processId process definition id
* @return copy result code
*/
public Map<String, Object> copyProcessDefinition(User loginUser, String projectName, Integer processId) throws JsonProcessingException{
public Map<String, Object> copyProcessDefinition(User loginUser, String projectName, Integer processId) throws JsonProcessingException {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
@ -310,7 +319,7 @@ public class ProcessDefinitionService extends BaseDAGService {
return createProcessDefinition(
loginUser,
projectName,
processDefinition.getName()+"_copy_"+System.currentTimeMillis(),
processDefinition.getName() + "_copy_" + System.currentTimeMillis(),
processDefinition.getProcessDefinitionJson(),
processDefinition.getDescription(),
processDefinition.getLocations(),
@ -321,14 +330,14 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* update process definition
*
* @param loginUser login user
* @param projectName project name
* @param name process definition name
* @param id process definition id
* @param loginUser login user
* @param projectName project name
* @param name process definition name
* @param id process definition id
* @param processDefinitionJson process definition json
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @return update result code
*/
public Map<String, Object> updateProcessDefinition(User loginUser, String projectName, int id, String name,
@ -397,9 +406,9 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* verify process definition name unique
*
* @param loginUser login user
* @param loginUser login user
* @param projectName project name
* @param name name
* @param name name
* @return true if process definition name not exists, otherwise false
*/
public Map<String, Object> verifyProcessDefinitionName(User loginUser, String projectName, String name) {
@ -424,8 +433,8 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* delete process definition by id
*
* @param loginUser login user
* @param projectName project name
* @param loginUser login user
* @param projectName project name
* @param processDefinitionId process definition id
* @return delete result code
*/
@ -456,22 +465,22 @@ public class ProcessDefinitionService extends BaseDAGService {
// check process definition is already online
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE,processDefinitionId);
putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, processDefinitionId);
return result;
}
// get the timing according to the process definition
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
if (!schedules.isEmpty() && schedules.size() > 1) {
logger.warn("scheduler num is {},Greater than 1",schedules.size());
logger.warn("scheduler num is {},Greater than 1", schedules.size());
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR);
return result;
}else if(schedules.size() == 1){
} else if (schedules.size() == 1) {
Schedule schedule = schedules.get(0);
if(schedule.getReleaseState() == ReleaseState.OFFLINE){
if (schedule.getReleaseState() == ReleaseState.OFFLINE) {
scheduleMapper.deleteById(schedule.getId());
}else if(schedule.getReleaseState() == ReleaseState.ONLINE){
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE,schedule.getId());
} else if (schedule.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId());
return result;
}
}
@ -489,9 +498,9 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* release process definition: online / offline
*
* @param loginUser login user
* @param projectName project name
* @param id process definition id
* @param loginUser login user
* @param projectName project name
* @param id process definition id
* @param releaseState release state
* @return release result code
*/
@ -510,7 +519,7 @@ public class ProcessDefinitionService extends BaseDAGService {
// check state
if (null == state) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "releaseState");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE);
return result;
}
@ -522,12 +531,12 @@ public class ProcessDefinitionService extends BaseDAGService {
String resourceIds = processDefinition.getResourceIds();
if (StringUtils.isNotBlank(resourceIds)) {
Integer[] resourceIdArray = Arrays.stream(resourceIds.split(",")).map(Integer::parseInt).toArray(Integer[]::new);
PermissionCheck<Integer> permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_ID,processService,resourceIdArray,loginUser.getId(),logger);
PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger);
try {
permissionCheck.checkPermission();
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, "releaseState");
logger.error(e.getMessage(), e);
putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE);
return result;
}
}
@ -542,7 +551,7 @@ public class ProcessDefinitionService extends BaseDAGService {
new int[]{processDefinition.getId()}
);
for(Schedule schedule:scheduleList){
for (Schedule schedule : scheduleList) {
logger.info("set schedule offline, project id: {}, schedule id: {}, process definition id: {}", project.getId(), schedule.getId(), id);
// set status
schedule.setReleaseState(ReleaseState.OFFLINE);
@ -551,7 +560,7 @@ public class ProcessDefinitionService extends BaseDAGService {
}
break;
default:
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "releaseState");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE);
return result;
}
@ -561,14 +570,15 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* batch export process definition by ids
*
* @param loginUser
* @param projectName
* @param processDefinitionIds
* @param response
*/
public void batchExportProcessDefinitionByIds(User loginUser, String projectName, String processDefinitionIds, HttpServletResponse response){
public void batchExportProcessDefinitionByIds(User loginUser, String projectName, String processDefinitionIds, HttpServletResponse response) {
if(StringUtils.isEmpty(processDefinitionIds)){
if (StringUtils.isEmpty(processDefinitionIds)) {
return;
}
@ -579,24 +589,25 @@ public class ProcessDefinitionService extends BaseDAGService {
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if(resultStatus != Status.SUCCESS){
if (resultStatus != Status.SUCCESS) {
return;
}
List<ProcessMeta> processDefinitionList =
getProcessDefinitionList(processDefinitionIds);
if(CollectionUtils.isNotEmpty(processDefinitionList)){
if (CollectionUtils.isNotEmpty(processDefinitionList)) {
downloadProcessDefinitionFile(response, processDefinitionList);
}
}
/**
* get process definition list by ids
*
* @param processDefinitionIds
* @return
*/
private List<ProcessMeta> getProcessDefinitionList(String processDefinitionIds){
private List<ProcessMeta> getProcessDefinitionList(String processDefinitionIds) {
List<ProcessMeta> processDefinitionList = new ArrayList<>();
String[] processDefinitionIdArray = processDefinitionIds.split(",");
for (String strProcessDefinitionId : processDefinitionIdArray) {
@ -613,6 +624,7 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* download the process definition file
*
* @param response
* @param processDefinitionList
*/
@ -623,12 +635,12 @@ public class ProcessDefinitionService extends BaseDAGService {
try {
out = response.getOutputStream();
buff = new BufferedOutputStream(out);
buff.write(JSON.toJSONString(processDefinitionList).getBytes(StandardCharsets.UTF_8));
buff.write(JSONUtils.toJsonString(processDefinitionList).getBytes(StandardCharsets.UTF_8));
buff.flush();
buff.close();
} catch (IOException e) {
logger.warn("export process fail", e);
}finally {
} finally {
if (null != buff) {
try {
buff.close();
@ -648,19 +660,21 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* get export process metadata string
*
* @param processDefinitionId process definition id
* @param processDefinition process definition
* @param processDefinition process definition
* @return export process metadata string
*/
public String exportProcessMetaDataStr(Integer processDefinitionId, ProcessDefinition processDefinition) {
//create workflow json file
return JSONUtils.toJsonString(exportProcessMetaData(processDefinitionId,processDefinition));
return JSONUtils.toJsonString(exportProcessMetaData(processDefinitionId, processDefinition));
}
/**
* get export process metadata string
*
* @param processDefinitionId process definition id
* @param processDefinition process definition
* @param processDefinition process definition
* @return export process metadata string
*/
public ProcessMeta exportProcessMetaData(Integer processDefinitionId, ProcessDefinition processDefinition) {
@ -696,17 +710,18 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* correct task param which has datasource or dependent
*
* @param processDefinitionJson processDefinitionJson
* @return correct processDefinitionJson
*/
public String addExportTaskNodeSpecialParam(String processDefinitionJson) {
JSONObject jsonObject = JSONUtils.parseObject(processDefinitionJson);
JSONArray jsonArray = (JSONArray) jsonObject.get("tasks");
ObjectNode jsonObject = JSONUtils.parseObject(processDefinitionJson);
ArrayNode jsonArray = (ArrayNode) jsonObject.path(TASKS);
for (int i = 0; i < jsonArray.size(); i++) {
JSONObject taskNode = jsonArray.getJSONObject(i);
if (StringUtils.isNotEmpty(taskNode.getString("type"))) {
String taskType = taskNode.getString("type");
JsonNode taskNode = jsonArray.path(i);
if (StringUtils.isNotEmpty(taskNode.path("type").asText())) {
String taskType = taskNode.path("type").asText();
ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskType);
if (null != addTaskParam) {
@ -714,12 +729,13 @@ public class ProcessDefinitionService extends BaseDAGService {
}
}
}
jsonObject.put("tasks", jsonArray);
jsonObject.set(TASKS, jsonArray);
return jsonObject.toString();
}
/**
* check task if has sub process
*
* @param taskType task type
* @return if task has sub process return true else false
*/
@ -729,8 +745,9 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* import process definition
* @param loginUser login user
* @param file process metadata json file
*
* @param loginUser login user
* @param file process metadata json file
* @param currentProjectName current project name
* @return import process
*/
@ -738,7 +755,7 @@ public class ProcessDefinitionService extends BaseDAGService {
public Map<String, Object> importProcessDefinition(User loginUser, MultipartFile file, String currentProjectName) {
Map<String, Object> result = new HashMap<>(5);
String processMetaJson = FileUtils.file2String(file);
List<ProcessMeta> processMetaList = JSON.parseArray(processMetaJson,ProcessMeta.class);
List<ProcessMeta> processMetaList = JSONUtils.toList(processMetaJson, ProcessMeta.class);
//check file content
if (CollectionUtils.isEmpty(processMetaList)) {
@ -746,9 +763,9 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
for(ProcessMeta processMeta:processMetaList){
for (ProcessMeta processMeta : processMetaList) {
if (!checkAndImportProcessDefinition(loginUser, currentProjectName, result, processMeta)){
if (!checkAndImportProcessDefinition(loginUser, currentProjectName, result, processMeta)) {
return result;
}
}
@ -758,6 +775,7 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* check and import process definition
*
* @param loginUser
* @param currentProjectName
* @param result
@ -766,7 +784,7 @@ public class ProcessDefinitionService extends BaseDAGService {
*/
private boolean checkAndImportProcessDefinition(User loginUser, String currentProjectName, Map<String, Object> result, ProcessMeta processMeta) {
if(!checkImportanceParams(processMeta,result)){
if (!checkImportanceParams(processMeta, result)) {
return false;
}
@ -774,7 +792,7 @@ public class ProcessDefinitionService extends BaseDAGService {
String processDefinitionName = processMeta.getProcessDefinitionName();
//use currentProjectName to query
Project targetProject = projectMapper.queryByName(currentProjectName);
if(null != targetProject){
if (null != targetProject) {
processDefinitionName = recursionProcessDefinitionName(targetProject.getId(),
processDefinitionName, 1);
}
@ -798,14 +816,14 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefinitionName,
addImportTaskNodeParam(loginUser, processMeta.getProcessDefinitionJson(), targetProject));
if(createProcessResult == null){
if (createProcessResult == null) {
return false;
}
//create process definition
Integer processDefinitionId =
Objects.isNull(createProcessResult.get("processDefinitionId"))?
null:Integer.parseInt(createProcessResult.get("processDefinitionId").toString());
Objects.isNull(createProcessResult.get(PROCESSDEFINITIONID)) ?
null : Integer.parseInt(createProcessResult.get(PROCESSDEFINITIONID).toString());
//scheduler param
return getImportProcessScheduleResult(loginUser,
@ -819,6 +837,7 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* get create process result
*
* @param loginUser
* @param currentProjectName
* @param result
@ -832,12 +851,12 @@ public class ProcessDefinitionService extends BaseDAGService {
Map<String, Object> result,
ProcessMeta processMeta,
String processDefinitionName,
String importProcessParam){
String importProcessParam) {
Map<String, Object> createProcessResult = null;
try {
createProcessResult = createProcessDefinition(loginUser
,currentProjectName,
processDefinitionName+"_import_"+System.currentTimeMillis(),
, currentProjectName,
processDefinitionName + "_import_" + System.currentTimeMillis(),
importProcessParam,
processMeta.getProcessDefinitionDescription(),
processMeta.getProcessDefinitionLocations(),
@ -853,6 +872,7 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* get import process schedule result
*
* @param loginUser
* @param currentProjectName
* @param result
@ -884,11 +904,12 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* check importance params
*
* @param processMeta
* @param result
* @return
*/
private boolean checkImportanceParams(ProcessMeta processMeta,Map<String, Object> result){
private boolean checkImportanceParams(ProcessMeta processMeta, Map<String, Object> result) {
if (StringUtils.isEmpty(processMeta.getProjectName())) {
putMsg(result, Status.DATA_IS_NULL, "projectName");
return false;
@ -907,18 +928,19 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* import process add special task param
* @param loginUser login user
*
* @param loginUser login user
* @param processDefinitionJson process definition json
* @param targetProject target project
* @param targetProject target project
* @return import process param
*/
private String addImportTaskNodeParam(User loginUser, String processDefinitionJson, Project targetProject) {
JSONObject jsonObject = JSONUtils.parseObject(processDefinitionJson);
JSONArray jsonArray = (JSONArray) jsonObject.get("tasks");
ObjectNode jsonObject = JSONUtils.parseObject(processDefinitionJson);
ArrayNode jsonArray = (ArrayNode) jsonObject.get(TASKS);
//add sql and dependent param
for (int i = 0; i < jsonArray.size(); i++) {
JSONObject taskNode = jsonArray.getJSONObject(i);
String taskType = taskNode.getString("type");
JsonNode taskNode = jsonArray.path(i);
String taskType = taskNode.path("type").asText();
ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskType);
if (null != addTaskParam) {
addTaskParam.addImportSpecialParam(taskNode);
@ -928,25 +950,26 @@ public class ProcessDefinitionService extends BaseDAGService {
//recursive sub-process parameter correction map key for old process id value for new process id
Map<Integer, Integer> subProcessIdMap = new HashMap<>(20);
List<Object> subProcessList = jsonArray.stream()
.filter(elem -> checkTaskHasSubProcess(JSONUtils.parseObject(elem.toString()).getString("type")))
List<Object> subProcessList = StreamUtils.asStream(jsonArray.elements())
.filter(elem -> checkTaskHasSubProcess(JSONUtils.parseObject(elem.toString()).path("type").asText()))
.collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(subProcessList)) {
importSubProcess(loginUser, targetProject, jsonArray, subProcessIdMap);
}
jsonObject.put("tasks", jsonArray);
jsonObject.set(TASKS, jsonArray);
return jsonObject.toString();
}
/**
* import process schedule
* @param loginUser login user
* @param currentProjectName current project name
* @param processMeta process meta data
*
* @param loginUser login user
* @param currentProjectName current project name
* @param processMeta process meta data
* @param processDefinitionName process definition name
* @param processDefinitionId process definition id
* @param processDefinitionId process definition id
* @return insert schedule flag
*/
public int importProcessSchedule(User loginUser, String currentProjectName, ProcessMeta processMeta,
@ -995,84 +1018,87 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* check import process has sub process
* recursion create sub process
* @param loginUser login user
* @param targetProject target project
* @param jsonArray process task array
*
* @param loginUser login user
* @param targetProject target project
* @param jsonArray process task array
* @param subProcessIdMap correct sub process id map
*/
public void importSubProcess(User loginUser, Project targetProject, JSONArray jsonArray, Map<Integer, Integer> subProcessIdMap) {
public void importSubProcess(User loginUser, Project targetProject, ArrayNode jsonArray, Map<Integer, Integer> subProcessIdMap) {
for (int i = 0; i < jsonArray.size(); i++) {
JSONObject taskNode = jsonArray.getJSONObject(i);
String taskType = taskNode.getString("type");
if (checkTaskHasSubProcess(taskType)) {
//get sub process info
JSONObject subParams = JSONUtils.parseObject(taskNode.getString("params"));
Integer subProcessId = subParams.getInteger("processDefinitionId");
ProcessDefinition subProcess = processDefineMapper.queryByDefineId(subProcessId);
//check is sub process exist in db
if (null != subProcess) {
String subProcessJson = subProcess.getProcessDefinitionJson();
//check current project has sub process
ProcessDefinition currentProjectSubProcess = processDefineMapper.queryByDefineName(targetProject.getId(), subProcess.getName());
if (null == currentProjectSubProcess) {
JSONArray subJsonArray = (JSONArray) JSONUtils.parseObject(subProcess.getProcessDefinitionJson()).get("tasks");
List<Object> subProcessList = subJsonArray.stream()
.filter(item -> checkTaskHasSubProcess(JSONUtils.parseObject(item.toString()).getString("type")))
.collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(subProcessList)) {
importSubProcess(loginUser, targetProject, subJsonArray, subProcessIdMap);
//sub process processId correct
if (!subProcessIdMap.isEmpty()) {
for (Map.Entry<Integer, Integer> entry : subProcessIdMap.entrySet()) {
String oldSubProcessId = "\"processDefinitionId\":" + entry.getKey();
String newSubProcessId = "\"processDefinitionId\":" + entry.getValue();
subProcessJson = subProcessJson.replaceAll(oldSubProcessId, newSubProcessId);
}
subProcessIdMap.clear();
}
}
ObjectNode taskNode = (ObjectNode) jsonArray.path(i);
String taskType = taskNode.path("type").asText();
//if sub-process recursion
Date now = new Date();
//create sub process in target project
ProcessDefinition processDefine = new ProcessDefinition();
processDefine.setName(subProcess.getName());
processDefine.setVersion(subProcess.getVersion());
processDefine.setReleaseState(subProcess.getReleaseState());
processDefine.setProjectId(targetProject.getId());
processDefine.setUserId(loginUser.getId());
processDefine.setProcessDefinitionJson(subProcessJson);
processDefine.setDescription(subProcess.getDescription());
processDefine.setLocations(subProcess.getLocations());
processDefine.setConnects(subProcess.getConnects());
processDefine.setTimeout(subProcess.getTimeout());
processDefine.setTenantId(subProcess.getTenantId());
processDefine.setGlobalParams(subProcess.getGlobalParams());
processDefine.setCreateTime(now);
processDefine.setUpdateTime(now);
processDefine.setFlag(subProcess.getFlag());
processDefine.setReceivers(subProcess.getReceivers());
processDefine.setReceiversCc(subProcess.getReceiversCc());
processDefineMapper.insert(processDefine);
logger.info("create sub process, project: {}, process name: {}", targetProject.getName(), processDefine.getName());
//modify task node
ProcessDefinition newSubProcessDefine = processDefineMapper.queryByDefineName(processDefine.getProjectId(),processDefine.getName());
if (null != newSubProcessDefine) {
subProcessIdMap.put(subProcessId, newSubProcessDefine.getId());
subParams.put("processDefinitionId", newSubProcessDefine.getId());
taskNode.put("params", subParams);
if (!checkTaskHasSubProcess(taskType)) {
continue;
}
//get sub process info
ObjectNode subParams = (ObjectNode) taskNode.path("params");
Integer subProcessId = subParams.path(PROCESSDEFINITIONID).asInt();
ProcessDefinition subProcess = processDefineMapper.queryByDefineId(subProcessId);
//check is sub process exist in db
if (null == subProcess) {
continue;
}
String subProcessJson = subProcess.getProcessDefinitionJson();
//check current project has sub process
ProcessDefinition currentProjectSubProcess = processDefineMapper.queryByDefineName(targetProject.getId(), subProcess.getName());
if (null == currentProjectSubProcess) {
ArrayNode subJsonArray = (ArrayNode) JSONUtils.parseObject(subProcess.getProcessDefinitionJson()).get(TASKS);
List<Object> subProcessList = StreamUtils.asStream(subJsonArray.elements())
.filter(item -> checkTaskHasSubProcess(JSONUtils.parseObject(item.toString()).path("type").asText()))
.collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(subProcessList)) {
importSubProcess(loginUser, targetProject, subJsonArray, subProcessIdMap);
//sub process processId correct
if (!subProcessIdMap.isEmpty()) {
for (Map.Entry<Integer, Integer> entry : subProcessIdMap.entrySet()) {
String oldSubProcessId = "\"processDefinitionId\":" + entry.getKey();
String newSubProcessId = "\"processDefinitionId\":" + entry.getValue();
subProcessJson = subProcessJson.replaceAll(oldSubProcessId, newSubProcessId);
}
subProcessIdMap.clear();
}
}
//if sub-process recursion
Date now = new Date();
//create sub process in target project
ProcessDefinition processDefine = new ProcessDefinition();
processDefine.setName(subProcess.getName());
processDefine.setVersion(subProcess.getVersion());
processDefine.setReleaseState(subProcess.getReleaseState());
processDefine.setProjectId(targetProject.getId());
processDefine.setUserId(loginUser.getId());
processDefine.setProcessDefinitionJson(subProcessJson);
processDefine.setDescription(subProcess.getDescription());
processDefine.setLocations(subProcess.getLocations());
processDefine.setConnects(subProcess.getConnects());
processDefine.setTimeout(subProcess.getTimeout());
processDefine.setTenantId(subProcess.getTenantId());
processDefine.setGlobalParams(subProcess.getGlobalParams());
processDefine.setCreateTime(now);
processDefine.setUpdateTime(now);
processDefine.setFlag(subProcess.getFlag());
processDefine.setReceivers(subProcess.getReceivers());
processDefine.setReceiversCc(subProcess.getReceiversCc());
processDefineMapper.insert(processDefine);
logger.info("create sub process, project: {}, process name: {}", targetProject.getName(), processDefine.getName());
//modify task node
ProcessDefinition newSubProcessDefine = processDefineMapper.queryByDefineName(processDefine.getProjectId(), processDefine.getName());
if (null != newSubProcessDefine) {
subProcessIdMap.put(subProcessId, newSubProcessDefine.getId());
subParams.put(PROCESSDEFINITIONID, newSubProcessDefine.getId());
taskNode.set("params", subParams);
}
}
}
}
@ -1081,7 +1107,7 @@ public class ProcessDefinitionService extends BaseDAGService {
/**
* check the process definition node meets the specifications
*
* @param processData process data
* @param processData process data
* @param processDefinitionJson process definition json
* @return check result code
*/
@ -1091,7 +1117,7 @@ public class ProcessDefinitionService extends BaseDAGService {
try {
if (processData == null) {
logger.error("process data is null");
putMsg(result,Status.DATA_IS_NOT_VALID, processDefinitionJson);
putMsg(result, Status.DATA_IS_NOT_VALID, processDefinitionJson);
return result;
}
@ -1122,7 +1148,7 @@ public class ProcessDefinitionService extends BaseDAGService {
// check extra params
CheckUtils.checkOtherParams(taskNode.getExtras());
}
putMsg(result,Status.SUCCESS);
putMsg(result, Status.SUCCESS);
} catch (Exception e) {
result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
result.put(Constants.MSG, e.getMessage());
@ -1135,9 +1161,8 @@ public class ProcessDefinitionService extends BaseDAGService {
*
* @param defineId define id
* @return task node list
* @throws Exception exception
*/
public Map<String, Object> getTaskNodeListByDefinitionId(Integer defineId) throws Exception {
public Map<String, Object> getTaskNodeListByDefinitionId(Integer defineId) {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
@ -1155,7 +1180,7 @@ public class ProcessDefinitionService extends BaseDAGService {
//process data check
if (null == processData) {
logger.error("process data is null");
putMsg(result,Status.DATA_IS_NOT_VALID, processDefinitionJson);
putMsg(result, Status.DATA_IS_NOT_VALID, processDefinitionJson);
return result;
}
@ -1173,15 +1198,14 @@ public class ProcessDefinitionService extends BaseDAGService {
*
* @param defineIdList define id list
* @return task node list
* @throws Exception exception
*/
public Map<String, Object> getTaskNodeListByDefinitionIdList(String defineIdList) throws Exception {
public Map<String, Object> getTaskNodeListByDefinitionIdList(String defineIdList) {
Map<String, Object> result = new HashMap<>();
Map<Integer, List<TaskNode>> taskNodeMap = new HashMap<>();
String[] idList = defineIdList.split(",");
List<Integer> idIntList = new ArrayList<>();
for(String definitionId : idList) {
for (String definitionId : idList) {
idIntList.add(Integer.parseInt(definitionId));
}
Integer[] idArray = idIntList.toArray(new Integer[idIntList.size()]);
@ -1192,7 +1216,7 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
for(ProcessDefinition processDefinition : processDefinitionList){
for (ProcessDefinition processDefinition : processDefinitionList) {
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = (processData.getTasks() == null) ? new ArrayList<>() : processData.getTasks();
@ -1228,7 +1252,7 @@ public class ProcessDefinitionService extends BaseDAGService {
* Encapsulates the TreeView structure
*
* @param processId process definition id
* @param limit limit
* @param limit limit
* @return tree view json data
* @throws Exception exception
*/
@ -1238,7 +1262,7 @@ public class ProcessDefinitionService extends BaseDAGService {
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (null == processDefinition) {
logger.info("process define not exists");
putMsg(result,Status.PROCESS_DEFINE_NOT_EXIST, processDefinition);
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinition);
return result;
}
DAG<String, TaskNode, TaskNodeRelation> dag = genDagGraph(processDefinition);
@ -1257,8 +1281,8 @@ public class ProcessDefinitionService extends BaseDAGService {
*/
List<ProcessInstance> processInstanceList = processInstanceMapper.queryByProcessDefineId(processId, limit);
for(ProcessInstance processInstance:processInstanceList){
processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(),processInstance.getEndTime()));
for (ProcessInstance processInstance : processInstanceList) {
processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(), processInstance.getEndTime()));
}
if (limit > processInstanceList.size()) {
@ -1315,9 +1339,9 @@ public class ProcessDefinitionService extends BaseDAGService {
*/
if (taskInstance.getTaskType().equals(TaskType.SUB_PROCESS.name())) {
String taskJson = taskInstance.getTaskJson();
taskNode = JSON.parseObject(taskJson, TaskNode.class);
subProcessId = Integer.parseInt(JSON.parseObject(
taskNode.getParams()).getString(CMDPARAM_SUB_PROCESS_DEFINE_ID));
taskNode = JSONUtils.parseObject(taskJson, TaskNode.class);
subProcessId = Integer.parseInt(JSONUtils.parseObject(
taskNode.getParams()).path(CMDPARAM_SUB_PROCESS_DEFINE_ID).asText());
}
treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskType(), taskInstance.getState().toString()
, taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId));
@ -1361,9 +1385,8 @@ public class ProcessDefinitionService extends BaseDAGService {
*
* @param processDefinition process definition
* @return dag graph
* @throws Exception if exception happens
*/
private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) throws Exception {
private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) {
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
@ -1383,8 +1406,6 @@ public class ProcessDefinitionService extends BaseDAGService {
}
/**
* whether the graph has a ring
*
@ -1402,7 +1423,7 @@ public class ProcessDefinitionService extends BaseDAGService {
// Fill edge relations
for (TaskNode taskNodeResponse : taskNodeResponseList) {
taskNodeResponse.getPreTasks();
List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(),String.class);
List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class);
if (CollectionUtils.isNotEmpty(preTasks)) {
for (String preTask : preTasks) {
if (!graph.addEdge(preTask, taskNodeResponse.getName())) {
@ -1415,19 +1436,19 @@ public class ProcessDefinitionService extends BaseDAGService {
return graph.hasCycle();
}
private String recursionProcessDefinitionName(Integer projectId,String processDefinitionName,int num){
private String recursionProcessDefinitionName(Integer projectId, String processDefinitionName, int num) {
ProcessDefinition processDefinition = processDefineMapper.queryByDefineName(projectId, processDefinitionName);
if (processDefinition != null) {
if(num > 1){
String str = processDefinitionName.substring(0,processDefinitionName.length() - 3);
processDefinitionName = str + "("+num+")";
}else{
processDefinitionName = processDefinition.getName() + "("+num+")";
if (num > 1) {
String str = processDefinitionName.substring(0, processDefinitionName.length() - 3);
processDefinitionName = str + "(" + num + ")";
} else {
processDefinitionName = processDefinition.getName() + "(" + num + ")";
}
}else{
} else {
return processDefinitionName;
}
return recursionProcessDefinitionName(projectId,processDefinitionName,num + 1);
return recursionProcessDefinitionName(projectId, processDefinitionName, num + 1);
}
}

33
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java

@ -16,7 +16,8 @@
*/
package org.apache.dolphinscheduler.api.service;
import java.nio.charset.StandardCharsets;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.dto.gantt.GanttDto;
import org.apache.dolphinscheduler.api.dto.gantt.Task;
import org.apache.dolphinscheduler.api.enums.Status;
@ -33,11 +34,11 @@ import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.common.utils.placeholder.BusinessTimeUtils;
import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -49,6 +50,7 @@ import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.text.ParseException;
import java.util.*;
import java.util.stream.Collectors;
@ -242,7 +244,7 @@ public class ProcessInstanceService extends BaseDAGService {
if(logResult.getCode() == Status.SUCCESS.ordinal()){
String log = (String) logResult.getData();
Map<String, DependResult> resultMap = parseLogForDependentResult(log);
taskInstance.setDependentResult(JSONUtils.toJson(resultMap));
taskInstance.setDependentResult(JSONUtils.toJsonString(resultMap));
}
}
}
@ -380,7 +382,7 @@ public class ProcessInstanceService extends BaseDAGService {
return result;
}
originDefParams = JSONUtils.toJson(processData.getGlobalParams());
originDefParams = JSONUtils.toJsonString(processData.getGlobalParams());
List<Property> globalParamList = processData.getGlobalParams();
Map<String, String> globalParamMap = globalParamList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
globalParams = ParameterUtils.curingGlobalParams(globalParamMap, globalParamList,
@ -476,8 +478,6 @@ public class ProcessInstanceService extends BaseDAGService {
return checkResult;
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstanceId);
if (null == processInstance) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
return result;
@ -485,8 +485,11 @@ public class ProcessInstanceService extends BaseDAGService {
processService.removeTaskLogFile(processInstanceId);
// delete database cascade
int delete = processService.deleteWorkProcessInstanceById(processInstanceId);
processService.deleteAllSubWorkProcessByParentId(processInstanceId);
processService.deleteWorkProcessMapByParentId(processInstanceId);
@ -504,9 +507,8 @@ public class ProcessInstanceService extends BaseDAGService {
*
* @param processInstanceId process instance id
* @return variables data
* @throws Exception exception
*/
public Map<String, Object> viewVariables( Integer processInstanceId) throws Exception {
public Map<String, Object> viewVariables(Integer processInstanceId) {
Map<String, Object> result = new HashMap<>(5);
ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId);
@ -530,16 +532,16 @@ public class ProcessInstanceService extends BaseDAGService {
List<Property> globalParams = new ArrayList<>();
if (userDefinedParams != null && userDefinedParams.length() > 0) {
globalParams = JSON.parseArray(userDefinedParams, Property.class);
globalParams = JSONUtils.toList(userDefinedParams, Property.class);
}
List<TaskNode> taskNodeList = workflowData.getTasks();
// global param string
String globalParamStr = JSON.toJSONString(globalParams);
String globalParamStr = JSONUtils.toJsonString(globalParams);
globalParamStr = ParameterUtils.convertParameterPlaceholders(globalParamStr, timeParams);
globalParams = JSON.parseArray(globalParamStr, Property.class);
globalParams = JSONUtils.toList(globalParamStr, Property.class);
for (Property property : globalParams) {
timeParams.put(property.getProp(), property.getValue());
}
@ -552,7 +554,8 @@ public class ProcessInstanceService extends BaseDAGService {
String localParams = map.get(LOCAL_PARAMS);
if (localParams != null && !localParams.isEmpty()) {
localParams = ParameterUtils.convertParameterPlaceholders(localParams, timeParams);
List<Property> localParamsList = JSON.parseArray(localParams, Property.class);
List<Property> localParamsList = JSONUtils.toList(localParams, Property.class);
Map<String,Object> localParamsMap = new HashMap<>();
localParamsMap.put("taskType",taskNode.getType());
localParamsMap.put("localParamsList",localParamsList);

6
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProjectService.java

@ -88,6 +88,8 @@ public class ProjectService extends BaseService{
project.setUpdateTime(now);
if (projectMapper.insert(project) > 0) {
Project insertedProject = projectMapper.queryByName(name);
result.put(Constants.DATA_LIST, insertedProject);
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.CREATE_PROJECT_ERROR);
@ -124,9 +126,7 @@ public class ProjectService extends BaseService{
* @return true if the login user have permission to see the project
*/
public Map<String, Object> checkProjectAndAuth(User loginUser, Project project, String projectName) {
Map<String, Object> result = new HashMap<>(5);
if (project == null) {
putMsg(result, Status.PROJECT_NOT_FOUNT, projectName);
} else if (!checkReadPermission(loginUser, project)) {
@ -135,8 +135,6 @@ public class ProjectService extends BaseService{
}else {
putMsg(result, Status.SUCCESS);
}
return result;
}

75
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java

@ -16,10 +16,9 @@
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fasterxml.jackson.databind.SerializationFeature;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
@ -32,10 +31,7 @@ import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
@ -352,24 +348,40 @@ public class ResourcesService extends BaseService {
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
String nameWithSuffix = name;
if (!resource.isDirectory()) {
//get the file suffix
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
//if the name without suffix then add it ,else use the origin name
if(!name.endsWith(suffix)){
nameWithSuffix = nameWithSuffix + suffix;
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
List<Integer> childrenResource = listAllChildren(resource,false);
String oldFullName = resource.getFullName();
Date now = new Date();
resource.setAlias(nameWithSuffix);
resource.setAlias(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
@ -381,7 +393,7 @@ public class ResourcesService extends BaseService {
List<Resource> childResourceList = new ArrayList<>();
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(oldFullName, matcherFullName));
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
@ -544,7 +556,6 @@ public class ResourcesService extends BaseService {
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
@ -992,10 +1003,23 @@ public class ResourcesService extends BaseService {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getAlias());
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if(user == null){
logger.error("user id {} not exists", userId);
throw new RuntimeException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if(tenant == null){
logger.error("tenant id {} not exists", user.getTenantId());
throw new RuntimeException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
@ -1128,8 +1152,9 @@ public class ResourcesService extends BaseService {
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
String visit = JSONUtils.toJsonString(visitor.visit(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS);
logger.info(visit);
String jsonTreeStr = JSONUtils.toJsonString(visitor.visit().getChildren(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
@ -1159,8 +1184,8 @@ public class ResourcesService extends BaseService {
*/
private String getTenantCode(int userId,Result result){
User user = userMapper.queryDetailsById(userId);
if(user == null){
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;

28
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java

@ -19,12 +19,13 @@ package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.dto.ScheduleParam;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.Project;
@ -165,6 +166,9 @@ public class SchedulerService extends BaseService {
processDefinition.setReceivers(receivers);
processDefinition.setReceiversCc(receiversCc);
processDefinitionMapper.updateById(processDefinition);
// return scheduler object with ID
result.put(Constants.DATA_LIST, scheduleMapper.selectById(scheduleObj.getId()));
putMsg(result, Status.SUCCESS);
result.put("scheduleId", scheduleObj.getId());
@ -330,10 +334,9 @@ public class SchedulerService extends BaseService {
if(scheduleStatus == ReleaseState.ONLINE){
// check process definition release state
if(processDefinition.getReleaseState() != ReleaseState.ONLINE){
ProcessDefinition definition = processDefinitionMapper.selectById(scheduleObj.getProcessDefinitionId());
logger.info("not release process definition id: {} , name : {}",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, definition.getName());
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefinition.getName());
return result;
}
// check sub process definition release state
@ -377,7 +380,7 @@ public class SchedulerService extends BaseService {
switch (scheduleStatus) {
case ONLINE: {
logger.info("Call master client set schedule online, project id: {}, flow id: {},host: {}", project.getId(), processDefinition.getId(), masterServers);
setSchedule(project.getId(), id);
setSchedule(project.getId(), scheduleObj);
break;
}
case OFFLINE: {
@ -392,7 +395,7 @@ public class SchedulerService extends BaseService {
}
} catch (Exception e) {
result.put(Constants.MSG, scheduleStatus == ReleaseState.ONLINE ? "set online failure" : "set offline failure");
throw new RuntimeException(result.get(Constants.MSG).toString());
throw new ServiceException(result.get(Constants.MSG).toString());
}
putMsg(result, Status.SUCCESS);
@ -469,15 +472,10 @@ public class SchedulerService extends BaseService {
return result;
}
public void setSchedule(int projectId, int scheduleId) throws RuntimeException{
logger.info("set schedule, project id: {}, scheduleId: {}", projectId, scheduleId);
public void setSchedule(int projectId, Schedule schedule) {
Schedule schedule = processService.querySchedule(scheduleId);
if (schedule == null) {
logger.warn("process schedule info not exists");
return;
}
int scheduleId = schedule.getId();
logger.info("set schedule, project id: {}, scheduleId: {}", projectId, scheduleId);
Date startDate = schedule.getStartTime();
Date endDate = schedule.getEndTime();
@ -499,7 +497,7 @@ public class SchedulerService extends BaseService {
* @param scheduleId schedule id
* @throws RuntimeException runtime exception
*/
public static void deleteSchedule(int projectId, int scheduleId) throws RuntimeException{
public static void deleteSchedule(int projectId, int scheduleId) {
logger.info("delete schedules of project id:{}, schedule id:{}", projectId, scheduleId);
String jobName = QuartzExecutors.buildJobName(scheduleId);
@ -507,7 +505,7 @@ public class SchedulerService extends BaseService {
if(!QuartzExecutors.getInstance().deleteJob(jobName, jobGroupName)){
logger.warn("set offline failure:projectId:{},scheduleId:{}",projectId,scheduleId);
throw new RuntimeException(String.format("set offline failure"));
throw new ServiceException("set offline failure");
}
}

6
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TenantService.java

@ -327,11 +327,11 @@ public class TenantService extends BaseService{
* @return true if tenant code can user, otherwise return false
*/
public Result verifyTenantCode(String tenantCode) {
Result result=new Result();
Result result = new Result();
if (checkTenantExists(tenantCode)) {
logger.error("tenant {} has exist, can't create again.", tenantCode);
putMsg(result, Status.TENANT_NAME_EXIST);
}else{
putMsg(result, Status.TENANT_NAME_EXIST, tenantCode);
} else {
putMsg(result, Status.SUCCESS);
}
return result;

98
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UsersService.java

@ -18,7 +18,10 @@ package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.CheckUtils;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
@ -35,6 +38,7 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.io.IOException;
import java.util.*;
import java.util.stream.Collectors;
@ -306,14 +310,11 @@ public class UsersService extends BaseService {
user.setEmail(email);
}
if (StringUtils.isNotEmpty(phone)) {
if (!CheckUtils.checkPhone(phone)){
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR,phone);
return result;
}
user.setPhone(phone);
if (StringUtils.isNotEmpty(phone) && !CheckUtils.checkPhone(phone)) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR,phone);
return result;
}
user.setPhone(phone);
user.setQueue(queue);
user.setState(state);
Date now = new Date();
@ -340,18 +341,18 @@ public class UsersService extends BaseService {
List<Resource> fileResourcesList = resourceMapper.queryResourceList(
null, userId, ResourceType.FILE.ordinal());
if (CollectionUtils.isNotEmpty(fileResourcesList)) {
for (Resource resource : fileResourcesList) {
HadoopUtils.getInstance().copy(oldResourcePath + "/" + resource.getAlias(), newResourcePath, false, true);
}
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(fileResourcesList);
ResourceComponent resourceComponent = resourceTreeVisitor.visit();
copyResourceFiles(resourceComponent, oldResourcePath, newResourcePath);
}
//udf resources
List<Resource> udfResourceList = resourceMapper.queryResourceList(
null, userId, ResourceType.UDF.ordinal());
if (CollectionUtils.isNotEmpty(udfResourceList)) {
for (Resource resource : udfResourceList) {
HadoopUtils.getInstance().copy(oldUdfsPath + "/" + resource.getAlias(), newUdfsPath, false, true);
}
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(udfResourceList);
ResourceComponent resourceComponent = resourceTreeVisitor.visit();
copyResourceFiles(resourceComponent, oldUdfsPath, newUdfsPath);
}
//Delete the user from the old tenant directory
@ -517,7 +518,7 @@ public class UsersService extends BaseService {
if (CollectionUtils.isNotEmpty(oldAuthorizedResIds)) {
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
List<Map<String, Object>> list = processDefinitionMapper.listResourcesByUser(userId);
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
@ -871,4 +872,73 @@ public class UsersService extends BaseService {
return msg;
}
/**
* copy resource files
* @param resourceComponent resource component
* @param srcBasePath src base path
* @param dstBasePath dst base path
* @throws IOException io exception
*/
private void copyResourceFiles(ResourceComponent resourceComponent, String srcBasePath, String dstBasePath) throws IOException {
List<ResourceComponent> components = resourceComponent.getChildren();
if (CollectionUtils.isNotEmpty(components)) {
for (ResourceComponent component:components) {
// verify whether exist
if (!HadoopUtils.getInstance().exists(String.format("%s/%s",srcBasePath,component.getFullName()))){
logger.error("resource file: {} not exist,copy error",component.getFullName());
throw new ServiceException(Status.RESOURCE_NOT_EXIST);
}
if (!component.isDirctory()) {
// copy it to dst
HadoopUtils.getInstance().copy(String.format("%s/%s",srcBasePath,component.getFullName()),String.format("%s/%s",dstBasePath,component.getFullName()),false,true);
continue;
}
if(CollectionUtils.isEmpty(component.getChildren())) {
// if not exist,need create it
if (!HadoopUtils.getInstance().exists(String.format("%s/%s",dstBasePath,component.getFullName()))) {
HadoopUtils.getInstance().mkdir(String.format("%s/%s",dstBasePath,component.getFullName()));
}
}else{
copyResourceFiles(component,srcBasePath,dstBasePath);
}
}
}
}
/**
* register user, default state is 0, default tenant_id is 1, no phone, no queue
*
* @param userName user name
* @param userPassword user password
* @param repeatPassword repeat password
* @param email email
* @return register result code
* @throws Exception exception
*/
@Transactional(rollbackFor = Exception.class)
public Map<String, Object> registerUser(String userName, String userPassword, String repeatPassword, String email) throws Exception {
Map<String, Object> result = new HashMap<>(5);
//check user params
String msg = this.checkUserParams(userName, userPassword, email, "");
if (!StringUtils.isEmpty(msg)) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR,msg);
return result;
}
if (!userPassword.equals(repeatPassword)) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "two passwords are not same");
return result;
}
createUser(userName, userPassword, email, 1, "", "", 0);
putMsg(result, Status.SUCCESS);
return result;
}
}

7
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java

@ -16,15 +16,12 @@
*/
package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.AccessToken;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
@ -149,8 +146,8 @@ public class WorkerGroupService extends BaseService {
if (isPaging){
wg.setIpList(childrenNodes);
String registeredIpValue = zookeeperCachedOperator.get(workerGroupPath + "/" + childrenNodes.get(0));
wg.setCreateTime(DateUtils.stringToDate(registeredIpValue.split(",")[3]));
wg.setUpdateTime(DateUtils.stringToDate(registeredIpValue.split(",")[4]));
wg.setCreateTime(DateUtils.stringToDate(registeredIpValue.split(",")[6]));
wg.setUpdateTime(DateUtils.stringToDate(registeredIpValue.split(",")[7]));
}
workerGroups.add(wg);
}

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/CheckUtils.java

@ -20,7 +20,7 @@ package org.apache.dolphinscheduler.api.utils;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
@ -115,7 +115,7 @@ public class CheckUtils {
*
* @param parameter parameter
* @param taskType task type
* @return true if taks node parameters are valid, otherwise return false
* @return true if task node parameters are valid, otherwise return false
*/
public static boolean checkTaskNodeParameters(String parameter, String taskType) {
AbstractParameters abstractParameters = TaskParametersUtils.getParameters(taskType, parameter);

5
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/ZookeeperMonitor.java

@ -81,8 +81,7 @@ public class ZookeeperMonitor extends AbstractZKClient {
if(ok){
state.getZookeeperInfo();
}
String hostName = zookeeperServer;
int connections = state.getConnections();
int watches = state.getWatches();
long sent = state.getSent();
@ -95,7 +94,7 @@ public class ZookeeperMonitor extends AbstractZKClient {
int status = ok ? 1 : 0;
Date date = new Date();
ZookeeperRecord zookeeperRecord = new ZookeeperRecord(hostName,connections,watches,sent,received,mode,minLatency,avgLatency,maxLatency,nodeCount,status,date);
ZookeeperRecord zookeeperRecord = new ZookeeperRecord(zookeeperServer,connections,watches,sent,received,mode,minLatency,avgLatency,maxLatency,nodeCount,status,date);
list.add(zookeeperRecord);
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save