Browse Source

Merge pull request #1 from apache/dev

update
pull/3/MERGE
Zhou.Z 4 years ago committed by GitHub
parent
commit
266f002ecb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      .github/ISSUE_TEMPLATE/bug_report.md
  2. 9
      .github/ISSUE_TEMPLATE/feature_request.md
  3. 24
      .github/ISSUE_TEMPLATE/improvement_suggestion.md
  4. 10
      .github/ISSUE_TEMPLATE/question.md
  5. 24
      .github/ISSUE_TEMPLATE/test.md
  6. 4
      .github/workflows/ci_e2e.yml
  7. 10
      .github/workflows/ci_ut.yml
  8. 17
      LICENSE
  9. 84
      NOTICE
  10. 17
      README.md
  11. 17
      README_zh_CN.md
  12. 106
      ambari_plugin/README.md
  13. 4
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/params.py
  14. 62
      ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json
  15. 6
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-alert.xml
  16. 261
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-datasource.xml
  17. 9
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-worker.xml
  18. 8
      ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-zookeeper.xml
  19. 4
      ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml
  20. 3
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_alert_service.py
  21. 3
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_api_service.py
  22. 4
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_logger_service.py
  23. 3
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_master_service.py
  24. 3
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_worker_service.py
  25. 15
      ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/params.py
  26. 2
      docker/build/Dockerfile
  27. 66
      docker/build/README.md
  28. 66
      docker/build/README_zh_CN.md
  29. 14
      docker/build/conf/dolphinscheduler/alert.properties.tpl
  30. 12
      docker/build/conf/dolphinscheduler/common.properties.tpl
  31. 15
      docker/build/conf/dolphinscheduler/datasource.properties.tpl
  32. 2
      docker/build/conf/dolphinscheduler/logback/logback-alert.xml
  33. 2
      docker/build/conf/dolphinscheduler/zookeeper.properties.tpl
  34. 23
      docker/build/startup-init-conf.sh
  35. 40
      docker/build/startup.sh
  36. 53
      docker/docker-swarm/docker-compose.yml
  37. 52
      docker/docker-swarm/docker-stack.yml
  38. 2
      docker/kubernetes/dolphinscheduler/Chart.yaml
  39. 42
      docker/kubernetes/dolphinscheduler/README.md
  40. 25
      docker/kubernetes/dolphinscheduler/requirements.yaml
  41. 13
      docker/kubernetes/dolphinscheduler/templates/NOTES.txt
  42. 8
      docker/kubernetes/dolphinscheduler/templates/_helpers.tpl
  43. 1
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
  44. 35
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml
  45. 98
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
  46. 93
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
  47. 17
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
  48. 93
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
  49. 163
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
  50. 87
      docker/kubernetes/dolphinscheduler/values.yaml
  51. 2
      dolphinscheduler-alert/pom.xml
  52. 15
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EmailManager.java
  53. 14
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java
  54. 10
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java
  55. 19
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/impl/DefaultHTMLTemplate.java
  56. 4
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
  57. 30
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtils.java
  58. 5
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java
  59. 8
      dolphinscheduler-alert/src/main/resources/alert.properties
  60. 2
      dolphinscheduler-alert/src/main/resources/logback-alert.xml
  61. 43
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/impl/DefaultHTMLTemplateTest.java
  62. 4
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java
  63. 2
      dolphinscheduler-api/pom.xml
  64. 3
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/configuration/AppConfiguration.java
  65. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
  66. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
  67. 31
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/UsersController.java
  68. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java
  69. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java
  70. 16
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java
  71. 9
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java
  72. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
  73. 18
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/interceptor/LoginHandlerInterceptor.java
  74. 41
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
  75. 54
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/MonitorService.java
  76. 5
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
  77. 66
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
  78. 23
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java
  79. 6
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TenantService.java
  80. 98
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UsersService.java
  81. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/CheckUtils.java
  82. 3
      dolphinscheduler-api/src/main/resources/application-api.properties
  83. 16
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/TenantControllerTest.java
  84. 19
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/UsersControllerTest.java
  85. 65
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataSourceServiceTest.java
  86. 14
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
  87. 174
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/SchedulerServiceTest.java
  88. 30
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/TenantServiceTest.java
  89. 45
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/UsersServiceTest.java
  90. 8
      dolphinscheduler-common/pom.xml
  91. 22
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
  92. 11
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ExecutionStatus.java
  93. 13
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/TaskNode.java
  94. 122
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/WorkerServerModel.java
  95. 40
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/shell/AbstractShell.java
  96. 13
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java
  97. 5
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java
  98. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CollectionUtils.java
  99. 4
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java
  100. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java
  101. Some files were not shown because too many files have changed in this diff Show More

6
.github/ISSUE_TEMPLATE/bug_report.md

@ -1,7 +1,7 @@
---
name: Bug report
about: Create a report to help us improve
title: "[BUG] bug title "
title: "[Bug][Module Name] Bug title "
labels: bug
assignees: ''
@ -9,6 +9,8 @@ assignees: ''
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the bug**
A clear and concise description of what the bug is.
@ -32,5 +34,5 @@ If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
**Requirement or improvement
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.

9
.github/ISSUE_TEMPLATE/feature_request.md

@ -1,12 +1,19 @@
---
name: Feature request
about: Suggest an idea for this project
title: "[Feature]"
title: "[Feature][Module Name] Feature title"
labels: new feature
assignees: ''
---
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the feature**
A clear and concise description of what the feature is.
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]

24
.github/ISSUE_TEMPLATE/improvement_suggestion.md

@ -0,0 +1,24 @@
---
name: Improvement suggestion
about: Improvement suggestion for this project
title: "[Improvement][Module Name] Improvement title"
labels: improvement
assignees: ''
---
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the question**
A clear and concise description of what the improvement is.
**What are the current deficiencies and the benefits of improvement**
- A clear and concise description of the current deficiencies and the benefits of this improvement.
**Which version of DolphinScheduler:**
-[1.1.0-preview]
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.

10
.github/ISSUE_TEMPLATE/question.md

@ -1,7 +1,7 @@
---
name: question
about: have a question wanted to be help
title: "[QUESTION] question title"
name: Question
about: Have a question wanted to be help
title: "[Question] Question title"
labels: question
assignees: ''
@ -9,6 +9,8 @@ assignees: ''
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the question**
A clear and concise description of what the question is.
@ -19,5 +21,5 @@ A clear and concise description of what the question is.
**Additional context**
Add any other context about the problem here.
**Requirement or improvement
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.

24
.github/ISSUE_TEMPLATE/test.md

@ -0,0 +1,24 @@
---
name: Test
about: Test to enhance the robustness of this project
title: "[Test][Module Name] Test title"
labels: test
assignees: ''
---
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the question**
A clear and concise description of what the test part is.
**What are the current deficiencies and the benefits of changing or adding this test**
- A clear and concise description of the current deficiencies, the benefits of changing or adding this test, and the scope involved.
**Which version of DolphinScheduler:**
-[1.1.0-preview]
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions you've considered.

4
.github/workflows/ci_e2e.yml

@ -66,9 +66,9 @@ jobs:
run: cd ./e2e && mvn -B clean test
- name: Collect logs
if: failure()
uses: actions/upload-artifact@v1
uses: actions/upload-artifact@v2
with:
name: dslogs
path: /var/lib/docker/volumes/dolphinscheduler-logs/_data
path: ${{ github.workspace }}/docker/docker-swarm/dolphinscheduler-logs

10
.github/workflows/ci_ut.yml

@ -62,15 +62,19 @@ jobs:
git fetch origin
- name: Compile
run: |
export MAVEN_OPTS='-Dmaven.repo.local=.m2/repository -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Xmx3g'
export MAVEN_OPTS='-Dmaven.repo.local=.m2/repository -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Xmx5g'
mvn test -B -Dmaven.test.skip=false
- name: Upload coverage report to codecov
run: |
CODECOV_TOKEN="09c2663f-b091-4258-8a47-c981827eb29a" bash <(curl -s https://codecov.io/bash)
# Set up JDK 11 for SonarCloud.
- name: Set up JDK 1.11
uses: actions/setup-java@v1
with:
java-version: 1.11
- name: Run SonarCloud Analysis
run: >
mvn verify --batch-mode
org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.1.1688:sonar
mvn --batch-mode verify sonar:sonar
-Dsonar.coverage.jacoco.xmlReportPaths=target/site/jacoco/jacoco.xml
-Dmaven.test.skip=true
-Dsonar.host.url=https://sonarcloud.io

17
LICENSE

@ -199,3 +199,20 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=======================================================================
Apache DolphinScheduler (incubating) Subcomponents:
The Apache DolphinScheduler (incubating) project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
========================================================================
Apache 2.0 licenses
========================================================================
The following components are provided under the Apache License. See project link for details.
The text of each license is the standard Apache 2.0 license.
ScriptRunner from https://github.com/mybatis/mybatis-3 Apache 2.0
mvnw files from https://github.com/takari/maven-wrapper Apache 2.0
PropertyPlaceholderHelper from https://github.com/spring-projects/spring-framework Apache 2.0

84
NOTICE

@ -1,5 +1,87 @@
Apache DolphinScheduler (incubating)
Copyright 2019 The Apache Software Foundation
Copyright 2019-2020 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
mybatis-3
iBATIS
This product includes software developed by
The Apache Software Foundation (http://www.apache.org/).
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
OGNL
//--------------------------------------------------------------------------
// Copyright (c) 2004, Drew Davidson and Luke Blanshard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// Neither the name of the Drew Davidson nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//--------------------------------------------------------------------------
Refactored SqlBuilder class (SQL, AbstractSQL)
This product includes software developed by
Adam Gent (https://gist.github.com/3650165)
Copyright 2010 Adam Gent
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Spring Framework ${version}
Copyright (c) 2002-${copyright} Pivotal, Inc.
This product is licensed to you under the Apache License, Version 2.0
(the "License"). You may not use this product except in compliance with
the License.
This product may include a number of subcomponents with separate
copyright notices and license terms. Your use of the source code for
these subcomponents is subject to the terms and conditions of the
subcomponent's license, as noted in the license.txt file.

17
README.md

@ -55,20 +55,10 @@ Overload processing: Task queue mechanism, the number of schedulable tasks on a
![monitor](https://user-images.githubusercontent.com/59273635/75625839-c698a480-5bfc-11ea-8bbe-895b561b337f.png)
![security](https://user-images.githubusercontent.com/15833811/75236441-bfd2f180-57f8-11ea-88bd-f24311e01b7e.png)
![treeview](https://user-images.githubusercontent.com/15833811/75217191-3fe56100-57d1-11ea-8856-f19180d9a879.png)
### Document
- <a href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/backend-deployment.html" target="_blank">Backend deployment documentation</a>
- <a href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/frontend-deployment.html" target="_blank">Front-end deployment documentation</a>
- [**User manual**](https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/system-manual.html?_blank "System manual")
- [**Upgrade document**](https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/upgrade.html?_blank "Upgrade document")
### Online Demo
- <a href="http://106.75.43.194:8888" target="_blank">Online Demo</a>
More documentation please refer to <a href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/quick-start.html" target="_blank">[DolphinScheduler online documentation]</a>
### Recent R&D plan
Work plan of Dolphin Scheduler: [R&D plan](https://github.com/apache/incubator-dolphinscheduler/projects/1), Under the `In Develop` card is what is currently being developed, TODO card is to be done (including feature ideas)
@ -86,9 +76,8 @@ Welcome to participate in contributing, please refer to the process of submittin
Artifact:
```
dolphinscheduler-dist/dolphinscheduler-backend/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-backend-bin.tar.gz: Binary package of DolphinScheduler-Backend
dolphinscheduler-dist/dolphinscheduler-front/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-front-bin.tar.gz: Binary package of DolphinScheduler-UI
dolphinscheduler-dist/dolphinscheduler-src/target/apache-dolphinscheduler-incubating-${latest.release.version}-src.zip: Source code package of DolphinScheduler
dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-bin.tar.gz: Binary package of DolphinScheduler
dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release.version}-src.zip: Source code package of DolphinScheduler
```
### Thanks

17
README_zh_CN.md

@ -50,20 +50,10 @@ Dolphin Scheduler Official Website
![security](https://user-images.githubusercontent.com/15833811/75209633-baa28200-57b9-11ea-9def-94bef2e212a7.jpg)
### 文档
- <a href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/backend-deployment.html" target="_blank">后端部署文档</a>
- <a href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/frontend-deployment.html" target="_blank">前端部署文档</a>
- [**使用手册**](https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/system-manual.html?_blank "系统使用手册")
- [**升级文档**](https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/upgrade.html?_blank "升级文档")
### 我要体验
- <a href="http://106.75.43.194:8888" target="_blank">我要体验</a>
更多文档请参考 <a href="https://dolphinscheduler.apache.org/zh-cn/docs/1.2.0/user_doc/quick-start.html" target="_blank">DolphinScheduler中文在线文档</a>
### 近期研发计划
@ -83,9 +73,8 @@ DolphinScheduler的工作计划:<a href="https://github.com/apache/incubator-d
Artifact:
```
dolphinscheduler-dist/dolphinscheduler-backend/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-backend-bin.tar.gz: Binary package of DolphinScheduler-Backend
dolphinscheduler-dist/dolphinscheduler-front/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-front-bin.tar.gz: Binary package of DolphinScheduler-UI
dolphinscheduler-dist/dolphinscheduler-src/target/apache-dolphinscheduler-incubating-${latest.release.version}-src.zip: Source code package of DolphinScheduler
dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release.version}-dolphinscheduler-bin.tar.gz: Binary package of DolphinScheduler
dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release.version}-src.zip: Source code package of DolphinScheduler
```
### 感谢

106
ambari_plugin/README.md

@ -1,27 +1,46 @@
### Dolphin Scheduler的Ambari插件使用说明
### Instructions for using the Dolphin Scheduler's Ambari plug-in
##### 备注
#### Note
1. 本文档适用于对Ambari中基本了解的用户
2. 本文档是对已安装Ambari服务添加Dolphin Scheduler(1.3.0版本)服务的说明
1. This document is intended for users with a basic understanding of Ambari
2. This document is a description of adding the Dolphin Scheduler service to the installed Ambari service
3. This document is based on version 2.5.2 of Ambari
##### 一 安装准备
#### Installation preparation
1. 准备RPM包
1. Prepare the RPM packages
- 在源码dolphinscheduler-dist目录下执行命令```mvn -U clean install rpm:attached-rpm -Prpmbuild -Dmaven.test.skip=true -X```即可生成(在目录 dolphinscheduler-dist/target/rpm/apache-dolphinscheduler-incubating/RPMS/noarch )
- It is generated by executing the command ```mvn -U clean install -Prpmbuild -Dmaven.test.skip=true -X``` in the project root directory (In the directory: dolphinscheduler-dist/target/rpm/apache-dolphinscheduler-incubating/RPMS/noarch )
2. 创建DS的安装用户--权限
2. Create an installation for DS,who have read and write access to the installation directory (/opt/soft)
3. 初始化数据库信息
3. Install with rpm package
- Manual installation (recommended):
- Copy the prepared RPM packages to each node of the cluster.
- Execute with DS installation user: ```rpm -ivh apache-dolphinscheduler-incubating-xxx.noarch.rpm```
- Mysql-connector-java packaged using the default POM file will not be included.
- The RPM package was packaged in the project with the installation path of /opt/soft.
If you use mysql as the database, you need add it manually.
- Automatic installation with ambari
- Each node of the cluster needs to configure the local yum source
- Copy the prepared RPM packages to each node local yum source
4. Copy plug-in directory
- copy directory ambari_plugin/common-services/DOLPHIN to ambari-server/resources/common-services/
- copy directory ambari_plugin/statcks/DOLPHIN to ambari-server/resources/stacks/HDP/2.6/services/--stack version is selected based on the actual situation
5. Initializes the database information
```
-- 创建Dolphin Scheduler的数据库:dolphinscheduler
-- Create the database for the Dolphin Scheduler:dolphinscheduler
CREATE DATABASE dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE
utf8_general_ci;
-- 初始化dolphinscheduler数据库的用户和密码,并分配权限
-- 替换下面sql语句中的{user}为dolphinscheduler数据库的用户
-- Initialize the user and password for the dolphinscheduler database and assign permissions
-- Replace the {user} in the SQL statement below with the user of the dolphinscheduler database
GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'%' IDENTIFIED BY '{password}';
GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'localhost' IDENTIFIED BY
'{password}';
@ -30,39 +49,84 @@
##### 二 Ambari安装Dolphin Scheduler
#### Ambari Install Dolphin Scheduler
- **NOTE: You have to install zookeeper first**
1. Ambari界面安装Dolphin Scheduler
1. Install Dolphin Scheduler on ambari web interface
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_001.png)
2. 选择Dolphin Scheduler的Master安装的节点
2. Select the nodes for the Dolphin Scheduler's Master installation
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_002.png)
3. 配置Dolphin Scheduler的Worker、Api、Logger、Alert安装的节点
3. Configure the Dolphin Scheduler's nodes for Worker, Api, Logger, Alert installation
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_003.png)
4. 设置Dolphin Scheduler服务的安装用户(**步骤一中创建的**)及所属的用户组
4. Set the installation users of the Dolphin Scheduler service (created in step 1) and the user groups they belong to
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_004.png)
5. 配置数据库的信息(和步骤一中初始化数据库中一致)
5. System Env Optimization will export some system environment config. Modify according to actual situation
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_020.png)
6. Configure the database information (same as in the initialization database in step 1)
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_005.png)
6. 配置其它的信息--如果需要的话
7. Configure additional information if needed
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_006.png)
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_007.png)
7. 正常执行接下来的步骤
8. Perform the next steps as normal
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_008.png)
8. 安装成功后的界面
9. The interface after successful installation
![](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_009.png)
------
#### Add components to the node through Ambari -- for example, add a DS Worker
***NOTE***: DS Logger is the installation dependent component of DS Worker in Dolphin's Ambari installation (need to add installation first; Prevent the Job log on the corresponding Worker from being checked)
1. Locate the component node to add -- for example, node ark3
![DS2_AMBARI_011](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_011.png)
2. Add components -- the drop-down list is all addable
![DS2_AMBARI_012](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_012.png)
3. Confirm component addition
![DS2_AMBARI_013](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_013.png)
4. After adding DS Worker and DS Logger components
![DS2_AMBARI_015](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_015.png)
5. Start the component
![DS2_AMBARI_016](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_016.png)
#### Remove the component from the node with Ambari
1. Stop the component in the corresponding node
![DS2_AMBARI_018](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_018.png)
2. Remove components
![DS2_AMBARI_019](https://github.com/apache/incubator-dolphinscheduler-website/blob/master/img/ambari-plugin/DS2_AMBARI_019.png)

4
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/params.py

@ -76,8 +76,8 @@ else:
dolphin_alert_map = {}
wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token'
wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret'
wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}'
wechat_team_send_msg = '{\"toparty\":\"{toParty}\",\"agentid\":\"{agentId}\",\"msgtype\":\"text\",\"text\":{\"content\":\"{msg}\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"{toUser}\",\"agentid\":\"{agentId}\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"{msg}\"}}'
dolphin_alert_map['enterprise.wechat.push.ur'] = wechat_push_url
dolphin_alert_map['enterprise.wechat.token.url'] = wechat_token_url

62
ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json

@ -28,6 +28,33 @@
}
}
],
"DOLPHIN_LOGGER": [
{
"name": "dolphin_logger_port_check",
"label": "dolphin_logger_port_check",
"description": "dolphin_logger_port_check.",
"interval": 10,
"scope": "ANY",
"source": {
"type": "PORT",
"uri": "{{dolphin-common/loggerserver.rpc.port}}",
"default_port": 50051,
"reporting": {
"ok": {
"text": "TCP OK - {0:.3f}s response on port {1}"
},
"warning": {
"text": "TCP OK - {0:.3f}s response on port {1}",
"value": 1.5
},
"critical": {
"text": "Connection failed: {0} to {1}:{2}",
"value": 5.0
}
}
}
}
],
"DOLPHIN_MASTER": [
{
"name": "DOLPHIN_MASTER_CHECK",
@ -96,7 +123,7 @@
],
"DOLPHIN_ALERT": [
{
"name": "DOLPHIN_ALERT_CHECK",
"name": "DOLPHIN_DOLPHIN_ALERT_CHECK",
"label": "check dolphin scheduler alert status",
"description": "",
"interval":10,
@ -126,39 +153,6 @@
]
}
}
],
"DOLPHIN_LOGGER": [
{
"name": "DOLPHIN_LOGGER_CHECK",
"label": "check dolphin scheduler logger status",
"description": "",
"interval":10,
"scope": "HOST",
"enabled": true,
"source": {
"type": "SCRIPT",
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
"parameters": [
{
"name": "connection.timeout",
"display_name": "Connection Timeout",
"value": 5.0,
"type": "NUMERIC",
"description": "The maximum time before this alert is considered to be CRITICAL",
"units": "seconds",
"threshold": "CRITICAL"
},
{
"name": "alertName",
"display_name": "alertName",
"value": "DOLPHIN_LOGGER",
"type": "STRING",
"description": "alert name"
}
]
}
}
]
}
}

6
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-alert.xml

@ -21,12 +21,6 @@
<description>alert type is EMAIL/SMS</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>alert.template</name>
<value>html</value>
<description>alter msg template, default is html template</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mail.protocol</name>
<value>SMTP</value>

261
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-datasource.xml

@ -203,265 +203,4 @@
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.mapper-locations</name>
<value>classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.typeEnumsPackage</name>
<value>org.apache.dolphinscheduler.*.enums</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.typeAliasesPackage</name>
<value>org.apache.dolphinscheduler.dao.entity</value>
<description>
Entity scan, where multiple packages are separated by a comma or semicolon
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.id-type</name>
<value>AUTO</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>AUTO</value>
<label>AUTO</label>
</entry>
<entry>
<value>INPUT</value>
<label>INPUT</label>
</entry>
<entry>
<value>ID_WORKER</value>
<label>ID_WORKER</label>
</entry>
<entry>
<value>UUID</value>
<label>UUID</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>
Primary key type AUTO:" database ID AUTO ",
INPUT:" user INPUT ID",
ID_WORKER:" global unique ID (numeric type unique ID)",
UUID:" global unique ID UUID";
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.field-strategy</name>
<value>NOT_NULL</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>IGNORED</value>
<label>IGNORED</label>
</entry>
<entry>
<value>NOT_NULL</value>
<label>NOT_NULL</label>
</entry>
<entry>
<value>NOT_EMPTY</value>
<label>NOT_EMPTY</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>
Field policy IGNORED:" ignore judgment ",
NOT_NULL:" not NULL judgment "),
NOT_EMPTY:" not NULL judgment"
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.column-underline</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.logic-delete-value</name>
<value>1</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.logic-not-delete-value</name>
<value>0</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.global-config.db-config.banner</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.map-underscore-to-camel-case</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.cache-enabled</name>
<value>false</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.call-setters-on-nulls</name>
<value>true</value>
<value-attributes>
<type>boolean</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>mybatis-plus.configuration.jdbc-type-for-null</name>
<value>null</value>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.exec.task.num</name>
<value>20</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.retryTimes</name>
<value>5</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.task.commit.interval</name>
<value>1000</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>master.reserved.memory</name>
<value>0.1</value>
<value-attributes>
<type>float</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.exec.threads</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.heartbeat.interval</name>
<value>10</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.fetch.task.num</name>
<value>3</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.max.cpuload.avg</name>
<value>100</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.reserved.memory</name>
<value>0.1</value>
<value-attributes>
<type>float</type>
</value-attributes>
<description></description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>

9
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-worker.xml

@ -33,15 +33,6 @@
<description>worker heartbeat interval</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.fetch.task.num</name>
<value>3</value>
<value-attributes>
<type>int</type>
</value-attributes>
<description>submit the number of tasks at a time</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>worker.max.cpuload.avg</name>
<value>100</value>

8
ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-zookeeper.xml

@ -15,14 +15,6 @@
~ limitations under the License.
-->
<configuration>
<property>
<name>dolphinscheduler.queue.impl</name>
<value>zookeeper</value>
<description>
Task queue implementation, default "zookeeper"
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>zookeeper.dolphinscheduler.root</name>
<value>/dolphinscheduler</value>

4
ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml

@ -103,7 +103,7 @@
<osFamily>any</osFamily>
<packages>
<package>
<name>apache-dolphinscheduler-incubating-1.3.0*</name>
<name>apache-dolphinscheduler-incubating*</name>
</package>
</packages>
</osSpecific>
@ -134,4 +134,4 @@
</quickLinksConfigurations>
</service>
</services>
</metainfo>
</metainfo>

3
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_alert_service.py

@ -26,7 +26,8 @@ class DolphinAlertService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params

3
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_api_service.py

@ -26,7 +26,8 @@ class DolphinApiService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params

4
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_logger_service.py

@ -26,8 +26,8 @@ class DolphinLoggerService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params
params.pika_slave = True

3
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_master_service.py

@ -27,7 +27,8 @@ class DolphinMasterService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params

3
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_worker_service.py

@ -26,7 +26,8 @@ class DolphinWorkerService(Script):
import params
env.set_params(params)
self.install_packages(env)
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
Execute(('chmod', '-R', '777', params.dolphin_home))
Execute(('chown', '-R', params.dolphin_user + ":" + params.dolphin_group, params.dolphin_home))
def configure(self, env):
import params

15
ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/params.py

@ -77,8 +77,8 @@ else:
dolphin_alert_map = {}
wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token'
wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret'
wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}'
wechat_team_send_msg = '{\"toparty\":\"{toParty}\",\"agentid\":\"{agentId}\",\"msgtype\":\"text\",\"text\":{\"content\":\"{msg}\"},\"safe\":\"0\"}'
wechat_user_send_msg = '{\"touser\":\"{toUser}\",\"agentid\":\"{agentId}\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"{msg}\"}}'
dolphin_alert_config_map = config['configurations']['dolphin-alert']
@ -114,10 +114,6 @@ else:
dolphin_common_map_tmp = config['configurations']['dolphin-common']
data_basedir_path = dolphin_common_map_tmp['data.basedir.path']
process_exec_basepath = data_basedir_path + '/exec'
data_download_basedir_path = data_basedir_path + '/download'
dolphin_common_map['process.exec.basepath'] = process_exec_basepath
dolphin_common_map['data.download.basedir.path'] = data_download_basedir_path
dolphin_common_map['dolphinscheduler.env.path'] = dolphin_env_path
dolphin_common_map.update(config['configurations']['dolphin-common'])
@ -149,6 +145,11 @@ if len(zookeeperHosts) > 0 and "clientPort" in config['configurations']['zoo.cfg
zookeeperPort = ":" + clientPort + ","
dolphin_zookeeper_map['zookeeper.quorum'] = zookeeperPort.join(zookeeperHosts) + ":" + clientPort
dolphin_zookeeper_map.update(config['configurations']['dolphin-zookeeper'])
if 'spring.servlet.multipart.max-file-size' in dolphin_app_api_map:
file_size = dolphin_app_api_map['spring.servlet.multipart.max-file-size']
dolphin_app_api_map['spring.servlet.multipart.max-file-size'] = file_size + "MB"
if 'spring.servlet.multipart.max-request-size' in dolphin_app_api_map:
request_size = dolphin_app_api_map['spring.servlet.multipart.max-request-size']
dolphin_app_api_map['spring.servlet.multipart.max-request-size'] = request_size + "MB"

2
docker/build/Dockerfile

@ -42,7 +42,7 @@ ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin/ /opt/dolphinscheduler/
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#4. install pg
#4. install database, if use mysql as your backend database, the `mysql-client` package should be installed
RUN apk add postgresql postgresql-contrib
#5. modify nginx
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \

66
docker/build/README.md

@ -16,7 +16,7 @@ Official Website: https://dolphinscheduler.apache.org
#### You can start a dolphinscheduler instance
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -e POSTGRESQL_DATABASE=dolphinscheduler \
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
-p 8888:8888 \
dolphinscheduler all
```
@ -25,14 +25,14 @@ The default postgres user `root`, postgres password `root` and database `dolphin
The default zookeeper is created in the `startup.sh`.
#### Or via Environment Variables **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`POSTGRESQL_DATABASE`** **`ZOOKEEPER_QUORUM`**
#### Or via Environment Variables **`DATABASE_HOST`** **`DATABASE_PORT`** **`DATABASE_DATABASE`** **`ZOOKEEPER_QUORUM`**
You can specify **existing postgres service**. Example:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 8888:8888 \
dolphinscheduler all
```
@ -42,7 +42,7 @@ You can specify **existing zookeeper service**. Example:
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
-p 8888:8888 \
dolphinscheduler all
```
@ -56,8 +56,8 @@ You can start a standalone dolphinscheduler server.
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler master-server
```
@ -66,8 +66,8 @@ dolphinscheduler master-server
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler worker-server
```
@ -75,8 +75,8 @@ dolphinscheduler worker-server
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 12345:12345 \
dolphinscheduler api-server
```
@ -85,8 +85,8 @@ dolphinscheduler api-server
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler alert-server
```
@ -99,7 +99,7 @@ $ docker run -dit --name dolphinscheduler \
dolphinscheduler frontend
```
**Note**: You must be specify `POSTGRESQL_HOST` `POSTGRESQL_PORT` `POSTGRESQL_DATABASE` `POSTGRESQL_USERNAME` `POSTGRESQL_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
**Note**: You must be specify `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
## How to build a docker image
@ -124,33 +124,51 @@ Please read `./docker/build/hooks/build` `./docker/build/hooks/build.bat` script
The Dolphin Scheduler image uses several environment variables which are easy to miss. While none of the variables are required, they may significantly aid you in using the image.
**`POSTGRESQL_HOST`**
**`DATABASE_TYPE`**
This environment variable sets the host for PostgreSQL. The default value is `127.0.0.1`.
This environment variable sets the type for database. The default value is `postgresql`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_PORT`**
**`DATABASE_DRIVER`**
This environment variable sets the port for PostgreSQL. The default value is `5432`.
This environment variable sets the type for database. The default value is `org.postgresql.Driver`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`DATABASE_HOST`**
This environment variable sets the host for database. The default value is `127.0.0.1`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`DATABASE_PORT`**
This environment variable sets the port for database. The default value is `5432`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_USERNAME`**
**`DATABASE_USERNAME`**
This environment variable sets the username for database. The default value is `root`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`DATABASE_PASSWORD`**
This environment variable sets the username for PostgreSQL. The default value is `root`.
This environment variable sets the password for database. The default value is `root`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_PASSWORD`**
**`DATABASE_DATABASE`**
This environment variable sets the password for PostgreSQL. The default value is `root`.
This environment variable sets the database for database. The default value is `dolphinscheduler`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`POSTGRESQL_DATABASE`**
**`DATABASE_PARAMS`**
This environment variable sets the database for PostgreSQL. The default value is `dolphinscheduler`.
This environment variable sets the database for database. The default value is `characterEncoding=utf8`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.

66
docker/build/README_zh_CN.md

@ -16,7 +16,7 @@ Official Website: https://dolphinscheduler.apache.org
#### 你可以运行一个dolphinscheduler实例
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -e POSTGRESQL_DATABASE=dolphinscheduler \
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
-p 8888:8888 \
dolphinscheduler all
```
@ -25,14 +25,14 @@ dolphinscheduler all
同时,默认的`Zookeeper`也会在`startup.sh`脚本中被创建。
#### 或者通过环境变量 **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`ZOOKEEPER_QUORUM`** 使用已存在的服务
#### 或者通过环境变量 **`DATABASE_HOST`** **`DATABASE_PORT`** **`ZOOKEEPER_QUORUM`** 使用已存在的服务
你可以指定一个已经存在的 **`Postgres`** 服务. 如下:
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 8888:8888 \
dolphinscheduler all
```
@ -42,7 +42,7 @@ dolphinscheduler all
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
-p 8888:8888 \
dolphinscheduler all
```
@ -56,8 +56,8 @@ dolphinscheduler all
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler master-server
```
@ -66,8 +66,8 @@ dolphinscheduler master-server
```
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler worker-server
```
@ -75,8 +75,8 @@ dolphinscheduler worker-server
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 12345:12345 \
dolphinscheduler api-server
```
@ -85,8 +85,8 @@ dolphinscheduler api-server
```
$ docker run -dit --name dolphinscheduler \
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
dolphinscheduler alert-server
```
@ -99,7 +99,7 @@ $ docker run -dit --name dolphinscheduler \
dolphinscheduler frontend
```
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `POSTGRESQL_HOST` `POSTGRESQL_PORT` `POSTGRESQL_DATABASE` `POSTGRESQL_USERNAME` `POSTGRESQL_PASSWORD` `ZOOKEEPER_QUORUM`
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM`
## 如何构建一个docker镜像
@ -124,33 +124,51 @@ c:\incubator-dolphinscheduler>.\docker\build\hooks\build.bat
Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些变量不是必须的,但是可以帮助你更容易配置镜像并根据你的需求定义相应的服务配置。
**`POSTGRESQL_HOST`**
**`DATABASE_TYPE`**
配置`PostgreSQL`的`HOST`, 默认值 `127.0.0.1`
配置`database`的`TYPE`, 默认值 `postgresql`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_PORT`**
**`DATABASE_DRIVER`**
配置`PostgreSQL`的`PORT`, 默认值 `5432`
配置`database`的`DRIVER`, 默认值 `org.postgresql.Driver`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_USERNAME`**
**`DATABASE_HOST`**
配置`PostgreSQL`的`USERNAME`, 默认值 `root`
配置`database`的`HOST`, 默认值 `127.0.0.1`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_PASSWORD`**
**`DATABASE_PORT`**
配置`PostgreSQL`的`PASSWORD`, 默认值 `root`
配置`database`的`PORT`, 默认值 `5432`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`POSTGRESQL_DATABASE`**
**`DATABASE_USERNAME`**
配置`PostgreSQL`的`DATABASE`, 默认值 `dolphinscheduler`
配置`database`的`USERNAME`, 默认值 `root`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`DATABASE_PASSWORD`**
配置`database`的`PASSWORD`, 默认值 `root`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`DATABASE_DATABASE`**
配置`database`的`DATABASE`, 默认值 `dolphinscheduler`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
**`DATABASE_PARAMS`**
配置`database`的`PARAMS`, 默认值 `characterEncoding=utf8`
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。

14
docker/build/conf/dolphinscheduler/alert.properties.tpl

@ -35,16 +35,22 @@ mail.smtp.ssl.trust=${MAIL_SMTP_SSL_TRUST}
#xls file path,need create if not exist
xls.file.path=${XLS_FILE_PATH}
# plugins dir
plugin.dir=${ALERT_PLUGIN_DIR}
# Enterprise WeChat configuration
enterprise.wechat.enable=${ENTERPRISE_WECHAT_ENABLE}
enterprise.wechat.corp.id=${ENTERPRISE_WECHAT_CORP_ID}
enterprise.wechat.secret=${ENTERPRISE_WECHAT_SECRET}
enterprise.wechat.agent.id=${ENTERPRISE_WECHAT_AGENT_ID}
enterprise.wechat.users=${ENTERPRISE_WECHAT_USERS}
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={corpId}&corpsecret={secret}
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={token}
enterprise.wechat.team.send.msg={\"toparty\":\"{toParty}\",\"agentid\":\"{agentId}\",\"msgtype\":\"text\",\"text\":{\"content\":\"{msg}\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"{toUser}\",\"agentid\":\"{agentId}\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"{msg}\"}}

12
docker/build/conf/dolphinscheduler/common.properties.tpl

@ -25,13 +25,13 @@ dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH}
data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH}
# resource upload startup type : HDFS,S3,NONE
resource.storage.type=NONE
resource.storage.type=${RESOURCE_STORAGE_TYPE}
#============================================================================
# HDFS
#============================================================================
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
#resource.upload.path=/dolphinscheduler
resource.upload.path=${RESOURCE_UPLOAD_PATH}
# whether kerberos starts
#hadoop.security.authentication.startup.state=false
@ -58,16 +58,16 @@ kerberos.expire.time=7
# S3
#============================================================================
# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
fs.defaultFS=hdfs://mycluster:8020
fs.defaultFS=${FS_DEFAULT_FS}
# if resource.storage.type=S3,s3 endpoint
#fs.s3a.endpoint=http://192.168.199.91:9010
fs.s3a.endpoint=${FS_S3A_ENDPOINT}
# if resource.storage.type=S3,s3 access key
#fs.s3a.access.key=A3DXS30FO22544RE
fs.s3a.access.key=${FS_S3A_ACCESS_KEY}
# if resource.storage.type=S3,s3 secret key
#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
fs.s3a.secret.key=${FS_S3A_SECRET_KEY}
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx

15
docker/build/conf/dolphinscheduler/datasource.properties.tpl

@ -15,16 +15,11 @@
# limitations under the License.
#
# mysql
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
# postgre
spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8
spring.datasource.username=${POSTGRESQL_USERNAME}
spring.datasource.password=${POSTGRESQL_PASSWORD}
# db
spring.datasource.driver-class-name=${DATABASE_DRIVER}
spring.datasource.url=jdbc:${DATABASE_TYPE}://${DATABASE_HOST}:${DATABASE_PORT}/${DATABASE_DATABASE}?${DATABASE_PARAMS}
spring.datasource.username=${DATABASE_USERNAME}
spring.datasource.password=${DATABASE_PASSWORD}
## base spring data source configuration todo need to remove
#spring.datasource.type=com.alibaba.druid.pool.DruidDataSource

2
docker/build/conf/dolphinscheduler/logback/logback-alert.xml

@ -46,7 +46,7 @@
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
<appender-ref ref="ALERTLOGFILE"/>
</root>
</configuration>

2
docker/build/conf/dolphinscheduler/zookeeper.properties.tpl

@ -19,7 +19,7 @@
zookeeper.quorum=${ZOOKEEPER_QUORUM}
# dolphinscheduler root directory
#zookeeper.dolphinscheduler.root=/dolphinscheduler
zookeeper.dolphinscheduler.root=${ZOOKEEPER_ROOT}
# dolphinscheduler failover directory
#zookeeper.session.timeout=60000

23
docker/build/startup-init-conf.sh

@ -24,22 +24,33 @@ echo "init env variables"
#============================================================================
# Database Source
#============================================================================
export POSTGRESQL_HOST=${POSTGRESQL_HOST:-"127.0.0.1"}
export POSTGRESQL_PORT=${POSTGRESQL_PORT:-"5432"}
export POSTGRESQL_USERNAME=${POSTGRESQL_USERNAME:-"root"}
export POSTGRESQL_PASSWORD=${POSTGRESQL_PASSWORD:-"root"}
export POSTGRESQL_DATABASE=${POSTGRESQL_DATABASE:-"dolphinscheduler"}
export DATABASE_HOST=${DATABASE_HOST:-"127.0.0.1"}
export DATABASE_PORT=${DATABASE_PORT:-"5432"}
export DATABASE_USERNAME=${DATABASE_USERNAME:-"root"}
export DATABASE_PASSWORD=${DATABASE_PASSWORD:-"root"}
export DATABASE_DATABASE=${DATABASE_DATABASE:-"dolphinscheduler"}
export DATABASE_TYPE=${DATABASE_TYPE:-"postgresql"}
export DATABASE_DRIVER=${DATABASE_DRIVER:-"org.postgresql.Driver"}
export DATABASE_PARAMS=${DATABASE_PARAMS:-"characterEncoding=utf8"}
#============================================================================
# System
#============================================================================
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"}
export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"}
export DOLPHINSCHEDULER_OPTS=${DOLPHINSCHEDULER_OPTS:-""}
export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"NONE"}
export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/ds"}
export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"s3a://xxxx"}
export FS_S3A_ENDPOINT=${FS_S3A_ENDPOINT:-"s3.xxx.amazonaws.com"}
export FS_S3A_ACCESS_KEY=${FS_S3A_ACCESS_KEY:-"xxxxxxx"}
export FS_S3A_SECRET_KEY=${FS_S3A_SECRET_KEY:-"xxxxxxx"}
#============================================================================
# Zookeeper
#============================================================================
export ZOOKEEPER_QUORUM=${ZOOKEEPER_QUORUM:-"127.0.0.1:2181"}
export ZOOKEEPER_ROOT=${ZOOKEEPER_ROOT:-"/dolphinscheduler"}
#============================================================================
# Master Server
@ -67,6 +78,8 @@ export WORKER_GROUP=${WORKER_GROUP:-"default"}
#============================================================================
# Alert Server
#============================================================================
# alert plugin dir
export ALERT_PLUGIN_DIR=${ALERT_PLUGIN_DIR:-"/opt/dolphinscheduler"}
# XLS FILE
export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"}
# mail

40
docker/build/startup.sh

@ -22,24 +22,32 @@ DOLPHINSCHEDULER_BIN=${DOLPHINSCHEDULER_HOME}/bin
DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script
DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
# start postgresql
initPostgreSQL() {
echo "test postgresql service"
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
# start database
initDatabase() {
echo "test ${DATABASE_TYPE} service"
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to ${DATABASE_TYPE}."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to ${DATABASE_TYPE} at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 5
done
echo "connect postgresql service"
v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -p ${POSTGRESQL_PORT} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1")
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
echo "connect ${DATABASE_TYPE} service"
if [ ${DATABASE_TYPE} = "mysql" ]; then
v=$(mysql -h${DATABASE_HOST} -P${DATABASE_PORT} -u${DATABASE_USERNAME} --password=${DATABASE_PASSWORD} -D ${DATABASE_DATABASE} -e "select 1" 2>&1)
if [ "$(echo '${v}' | grep 'ERROR' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
else
v=$(sudo -u postgres PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
fi
echo "import sql data"
@ -123,7 +131,7 @@ LOGFILE=/var/log/nginx/access.log
case "$1" in
(all)
initZK
initPostgreSQL
initDatabase
initMasterServer
initWorkerServer
initApiServer
@ -134,25 +142,25 @@ case "$1" in
;;
(master-server)
initZK
initPostgreSQL
initDatabase
initMasterServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log
;;
(worker-server)
initZK
initPostgreSQL
initDatabase
initWorkerServer
initLoggerServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log
;;
(api-server)
initZK
initPostgreSQL
initDatabase
initApiServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
;;
(alert-server)
initPostgreSQL
initDatabase
initAlertServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log
;;

53
docker/docker-swarm/docker-compose.yml

@ -56,11 +56,11 @@ services:
- 12345:12345
environment:
TZ: Asia/Shanghai
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
@ -72,7 +72,7 @@ services:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -95,7 +95,7 @@ services:
depends_on:
- dolphinscheduler-api
volumes:
- dolphinscheduler-logs:/var/log/nginx
- ./dolphinscheduler-logs:/var/log/nginx
networks:
- dolphinscheduler
@ -119,11 +119,11 @@ services:
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
interval: 30s
@ -133,7 +133,7 @@ services:
depends_on:
- dolphinscheduler-postgresql
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -152,11 +152,11 @@ services:
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
@ -168,7 +168,7 @@ services:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -188,11 +188,11 @@ services:
WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
@ -210,9 +210,7 @@ services:
- type: volume
source: dolphinscheduler-worker-data
target: /tmp/dolphinscheduler
- type: volume
source: dolphinscheduler-logs
target: /opt/dolphinscheduler/logs
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -225,7 +223,6 @@ volumes:
dolphinscheduler-postgresql-initdb:
dolphinscheduler-zookeeper:
dolphinscheduler-worker-data:
dolphinscheduler-logs:
configs:
dolphinscheduler-worker-task-env:

52
docker/docker-swarm/docker-stack.yml

@ -20,13 +20,13 @@ services:
dolphinscheduler-postgresql:
image: bitnami/postgresql:latest
ports:
- 5432:5432
environment:
TZ: Asia/Shanghai
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ports:
- 5432:5432
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
networks:
@ -37,12 +37,12 @@ services:
dolphinscheduler-zookeeper:
image: bitnami/zookeeper:latest
ports:
- 2181:2181
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
ports:
- 2181:2181
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
@ -54,16 +54,16 @@ services:
dolphinscheduler-api:
image: apache/dolphinscheduler:latest
command: ["api-server"]
ports:
- 12345:12345
environment:
TZ: Asia/Shanghai
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
ports:
- 12345:12345
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30
@ -120,11 +120,11 @@ services:
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
interval: 30
@ -153,11 +153,11 @@ services:
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
@ -188,11 +188,11 @@ services:
WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]

2
docker/kubernetes/dolphinscheduler/Chart.yaml

@ -49,4 +49,4 @@ dependencies:
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled
condition: zookeeper.enabled

42
docker/kubernetes/dolphinscheduler/README.md

@ -46,8 +46,8 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `image.pullSecres` | PullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
@ -55,12 +55,15 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.type` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database type will use it. | `postgresql` |
| `externalDatabase.driver` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database driver will use it. | `org.postgresql.Driver` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
| | | |
| `externalDatabase.params` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database params will use it. | `characterEncoding=utf8` |
| | | |
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
@ -68,12 +71,25 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
| | | |
| `externalZookeeper.zookeeperRoot` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper root path for `master` and `worker` | `dolphinscheduler` |
| | | |
| `common.configmap.DOLPHINSCHEDULER_ENV_PATH` | Extra env file path. | `/tmp/dolphinscheduler/env` |
| `common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | File uploaded path of DS. | `/tmp/dolphinscheduler/files` |
| `common.configmap.RESOURCE_STORAGE_TYPE` | Resource Storate type, support type are: S3、HDFS、NONE. | `NONE` |
| `common.configmap.RESOURCE_UPLOAD_PATH` | The base path of resource. | `/ds` |
| `common.configmap.FS_DEFAULT_FS` | The default fs of resource, for s3 is the `s3a` prefix and bucket name. | `s3a://xxxx` |
| `common.configmap.FS_S3A_ENDPOINT` | If the resource type is `S3`, you should fill this filed, it's the endpoint of s3. | `s3.xxx.amazonaws.com` |
| `common.configmap.FS_S3A_ACCESS_KEY` | The access key for your s3 bucket. | `xxxxxxx` |
| `common.configmap.FS_S3A_SECRET_KEY` | The secret key for your s3 bucket. | `xxxxxxx` |
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| | | |
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `master.jvmOptions` | The JVM options for master server. | `""` |
| `master.resources` | The `resource` limit and request config for master server. | `{}` |
| `master.annotations` | The `annotations` for master server. | `{}` |
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
@ -97,12 +113,15 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| | | |
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `worker.jvmOptions` | The JVM options for worker server. | `""` |
| `worker.resources` | The `resource` limit and request config for worker server. | `{}` |
| `worker.annotations` | The `annotations` for worker server. | `{}` |
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
@ -131,7 +150,7 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| | | |
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
@ -139,6 +158,10 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `alert.jvmOptions` | The JVM options for alert server. | `""` |
| `alert.resources` | The `resource` limit and request config for alert server. | `{}` |
| `alert.annotations` | The `annotations` for alert server. | `{}` |
| `alert.configmap.ALERT_PLUGIN_DIR` | Alert plugin path. | `/opt/dolphinscheduler/alert/plugin` |
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
@ -177,6 +200,9 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `api.jvmOptions` | The JVM options for api server. | `""` |
| `api.resources` | The `resource` limit and request config for api server. | `{}` |
| `api.annotations` | The `annotations` for api server. | `{}` |
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
@ -201,6 +227,8 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.resources` | The `resource` limit and request config for frontend server. | `{}` |
| `frontend.annotations` | The `annotations` for frontend server. | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |

25
docker/kubernetes/dolphinscheduler/requirements.yaml

@ -1,25 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

13
docker/kubernetes/dolphinscheduler/templates/NOTES.txt

@ -29,16 +29,3 @@
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}
2. Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }}
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
echo "Dolphinscheduler URL: http://$HOSTNAME/"
{{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}

8
docker/kubernetes/dolphinscheduler/templates/_helpers.tpl

@ -96,6 +96,14 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- printf "%s/%s:%s" (include "dolphinscheduler.image.registry" .) .Values.image.repository .Values.image.tag -}}
{{- end -}}
{{/*
Create a default image pull secrects.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.image.pullSecrets" -}}
{{- default nil .Values.image.pullSecrets -}}
{{- end -}}
{{/*
Create a default fully qualified postgresql name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).

1
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml

@ -24,6 +24,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
ALERT_PLUGIN_DIR: {{ .Values.alert.configmap.ALERT_PLUGIN_DIR | quote }}
XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}
MAIL_SERVER_PORT: {{ .Values.alert.configmap.MAIL_SERVER_PORT | quote }}

35
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.common.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-common
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-common
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
DOLPHINSCHEDULER_ENV_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_ENV_PATH | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH | quote }}
RESOURCE_STORAGE_TYPE: {{ .Values.common.configmap.RESOURCE_STORAGE_TYPE | quote }}
RESOURCE_UPLOAD_PATH: {{ .Values.common.configmap.RESOURCE_UPLOAD_PATH | quote }}
FS_DEFAULT_FS: {{ .Values.common.configmap.FS_DEFAULT_FS | quote }}
FS_S3A_ENDPOINT: {{ .Values.common.configmap.FS_S3A_ENDPOINT | quote }}
FS_S3A_ACCESS_KEY: {{ .Values.common.configmap.FS_S3A_ACCESS_KEY | quote }}
FS_S3A_SECRET_KEY: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | quote }}
{{- end }}

98
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml

@ -43,6 +43,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.alert.affinity }}
affinity: {{- toYaml .Values.alert.affinity | nindent 8 }}
@ -54,34 +58,38 @@ spec:
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -89,8 +97,15 @@ spec:
- "alert-server"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.alert.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: ALERT_PLUGIN_DIR
valueFrom:
configMapKeyRef:
key: ALERT_PLUGIN_DIR
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
@ -161,25 +176,37 @@ spec:
configMapKeyRef:
key: ENTERPRISE_WECHAT_USERS
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: POSTGRESQL_HOST
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
{{- else }}
value: {{ .Values.externalDatabase.type | quote }}
{{- end }}
- name: DATABASE_DRIVER
{{- if .Values.postgresql.enabled }}
value: "org.postgresql.Driver"
{{- else }}
value: {{ .Values.externalDatabase.driver | quote }}
{{- end }}
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
- name: POSTGRESQL_USERNAME
- name: DATABASE_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
@ -189,12 +216,57 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: POSTGRESQL_DATABASE
- name: DATABASE_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: DATABASE_PARAMS
{{- if .Values.postgresql.enabled }}
value: "characterEncoding=utf8"
{{- else }}
value: {{ .Values.externalDatabase.params | quote }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.alert.resources }}
resources:
limits:
memory: {{ .Values.alert.resources.limits.memory }}
cpu: {{ .Values.alert.resources.limits.cpu }}
requests:
memory: {{ .Values.alert.resources.requests.memory }}
cpu: {{ .Values.alert.resources.requests.cpu }}
{{- end }}
{{- if .Values.alert.livenessProbe.enabled }}
livenessProbe:
exec:

93
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml

@ -43,6 +43,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.api.affinity }}
affinity: {{- toYaml .Values.api.affinity | nindent 8 }}
@ -54,34 +58,38 @@ spec:
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-api
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -92,27 +100,41 @@ spec:
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.api.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: POSTGRESQL_HOST
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
{{- else }}
value: {{ .Values.externalDatabase.type | quote }}
{{- end }}
- name: DATABASE_DRIVER
{{- if .Values.postgresql.enabled }}
value: "org.postgresql.Driver"
{{- else }}
value: {{ .Values.externalDatabase.driver | quote }}
{{- end }}
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
- name: POSTGRESQL_USERNAME
- name: DATABASE_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
@ -122,18 +144,63 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: POSTGRESQL_DATABASE
- name: DATABASE_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: DATABASE_PARAMS
{{- if .Values.postgresql.enabled }}
value: "characterEncoding=utf8"
{{- else }}
value: {{ .Values.externalDatabase.params | quote }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.api.resources }}
resources:
limits:
memory: {{ .Values.api.resources.limits.memory | quote }}
cpu: {{ .Values.api.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.api.resources.requests.memory | quote }}
cpu: {{ .Values.api.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.api.livenessProbe.enabled }}
livenessProbe:
tcpSocket:

17
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml

@ -43,6 +43,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.frontend.affinity }}
affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
@ -53,6 +57,10 @@ spec:
{{- if .Values.frontend.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -69,6 +77,15 @@ spec:
value: '{{ include "dolphinscheduler.fullname" . }}-api'
- name: FRONTEND_API_SERVER_PORT
value: "12345"
{{- if .Values.frontend.resources }}
resources:
limits:
memory: {{ .Values.frontend.resources.limits.memory | quote }}
cpu: {{ .Values.frontend.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.frontend.resources.requests.memory | quote }}
cpu: {{ .Values.frontend.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.frontend.livenessProbe.enabled }}
livenessProbe:
tcpSocket:

93
docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml

@ -40,6 +40,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.master.affinity }}
affinity: {{- toYaml .Values.master.affinity | nindent 8 }}
@ -75,34 +79,38 @@ spec:
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-master
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -113,6 +121,8 @@ spec:
name: "master-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.master.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: MASTER_EXEC_THREADS
@ -160,25 +170,37 @@ spec:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: POSTGRESQL_HOST
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
{{- else }}
value: {{ .Values.externalDatabase.type | quote }}
{{- end }}
- name: DATABASE_DRIVER
{{- if .Values.postgresql.enabled }}
value: "org.postgresql.Driver"
{{- else }}
value: {{ .Values.externalDatabase.driver | quote }}
{{- end }}
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
- name: POSTGRESQL_USERNAME
- name: DATABASE_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
@ -188,18 +210,63 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: POSTGRESQL_DATABASE
- name: DATABASE_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: DATABASE_PARAMS
{{- if .Values.postgresql.enabled }}
value: "characterEncoding=utf8"
{{- else }}
value: {{ .Values.externalDatabase.params | quote }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: {{ template "dolphinscheduler.zookeeper.quorum" . }}
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.master.resources }}
resources:
limits:
memory: {{ .Values.master.resources.limits.memory | quote }}
cpu: {{ .Values.master.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.master.resources.requests.memory | quote }}
cpu: {{ .Values.master.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.master.livenessProbe.enabled }}
livenessProbe:
exec:

163
docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml

@ -40,6 +40,10 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.worker.affinity }}
affinity: {{- toYaml .Values.worker.affinity | nindent 8 }}
@ -75,34 +79,38 @@ spec:
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-worker
image: {{ include "dolphinscheduler.image.repository" . | quote }}
@ -115,6 +123,8 @@ spec:
name: "logs-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.worker.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: WORKER_EXEC_THREADS
@ -157,25 +167,37 @@ spec:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: POSTGRESQL_HOST
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
{{- else }}
value: {{ .Values.externalDatabase.type | quote }}
{{- end }}
- name: DATABASE_DRIVER
{{- if .Values.postgresql.enabled }}
value: "org.postgresql.Driver"
{{- else }}
value: {{ .Values.externalDatabase.driver | quote }}
{{- end }}
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
- name: POSTGRESQL_USERNAME
- name: DATABASE_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
@ -185,18 +207,133 @@ spec:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: POSTGRESQL_DATABASE
- name: DATABASE_DATABASE
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlDatabase }}
{{- else }}
value: {{ .Values.externalDatabase.database | quote }}
{{- end }}
- name: DATABASE_PARAMS
{{- if .Values.postgresql.enabled }}
value: "characterEncoding=utf8"
{{- else }}
value: {{ .Values.externalDatabase.params | quote }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
key: XLS_FILE_PATH
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_HOST
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_HOST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_PORT
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_PORT
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SENDER
valueFrom:
configMapKeyRef:
key: MAIL_SENDER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_USER
valueFrom:
configMapKeyRef:
key: MAIL_USER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_PASSWD
valueFrom:
configMapKeyRef:
key: MAIL_PASSWD
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_STARTTLS_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_STARTTLS_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_TRUST
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_TRUST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_ENABLE
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_CORP_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_CORP_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_SECRET
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_SECRET
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_AGENT_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_AGENT_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_USERS
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_USERS
name: {{ include "dolphinscheduler.fullname" . }}-alert
{{- if .Values.worker.resources }}
resources:
limits:
memory: {{ .Values.worker.resources.limits.memory | quote }}
cpu: {{ .Values.worker.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.worker.resources.requests.memory | quote }}
cpu: {{ .Values.worker.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.worker.livenessProbe.enabled }}
livenessProbe:
exec:

87
docker/kubernetes/dolphinscheduler/values.yaml

@ -29,10 +29,9 @@ image:
repository: "dolphinscheduler"
tag: "latest"
pullPolicy: "IfNotPresent"
pullSecrets: []
imagePullSecrets: []
# If not exists external postgresql, by default, Dolphinscheduler's database will use it.
# If not exists external database, by default, Dolphinscheduler's database will use it.
postgresql:
enabled: true
postgresqlUsername: "root"
@ -43,21 +42,24 @@ postgresql:
size: "20Gi"
storageClass: "-"
# If exists external postgresql, and set postgresql.enable value to false.
# If postgresql.enable is false, Dolphinscheduler's database will use it.
# If exists external database, and set postgresql.enable value to false.
# external database will be used, otherwise Dolphinscheduler's database will be used.
externalDatabase:
type: "postgresql"
driver: "org.postgresql.Driver"
host: "localhost"
port: "5432"
username: "root"
password: "root"
database: "dolphinscheduler"
# multi params should join with & char
params: "characterEncoding=utf8"
# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
zookeeper:
enabled: true
taskQueue: "zookeeper"
config:
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
config: null
service:
port: "2181"
persistence:
@ -70,6 +72,18 @@ zookeeper:
externalZookeeper:
taskQueue: "zookeeper"
zookeeperQuorum: "127.0.0.1:2181"
zookeeperRoot: "/dolphinscheduler"
common:
configmap:
DOLPHINSCHEDULER_ENV_PATH: "/tmp/dolphinscheduler/env"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler/files"
RESOURCE_STORAGE_TYPE: "NONE"
RESOURCE_UPLOAD_PATH: "/ds"
FS_DEFAULT_FS: "s3a://xxxx"
FS_S3A_ENDPOINT: "s3.xxx.amazonaws.com"
FS_S3A_ACCESS_KEY: "xxxxxxx"
FS_S3A_SECRET_KEY: "xxxxxxx"
master:
podManagementPolicy: "Parallel"
@ -85,6 +99,18 @@ master:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "18Gi"
# cpu: "4"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
@ -137,6 +163,18 @@ worker:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "18Gi"
# cpu: "4"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
@ -213,9 +251,22 @@ alert:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "4Gi"
# cpu: "1"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
ALERT_PLUGIN_DIR: "/opt/dolphinscheduler/alert/plugin"
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
@ -275,6 +326,18 @@ api:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "4Gi"
# cpu: "2"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
@ -322,6 +385,16 @@ frontend:
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
resources: {}
# limits:
# memory: "256Mi"
# cpu: "1"
# requests:
# memory: "256Mi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:

2
dolphinscheduler-alert/pom.xml

@ -21,7 +21,7 @@
<parent>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.2.1-SNAPSHOT</version>
<version>1.3.2-SNAPSHOT</version>
</parent>
<artifactId>dolphinscheduler-alert</artifactId>
<name>${project.artifactId}</name>

15
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EmailManager.java

@ -27,28 +27,29 @@ import java.util.Map;
public class EmailManager {
/**
* email send
* @param receviersList the receiver list
* @param receviersCcList the cc List
* @param receiversList the receiver list
* @param receiversCcList the cc List
* @param title the title
* @param content the content
* @param showType the showType
* @return the send result
*/
public Map<String,Object> send(List<String> receviersList,List<String> receviersCcList,String title,String content,String showType){
public Map<String,Object> send(List<String> receiversList,List<String> receiversCcList,String title,String content,String showType){
return MailUtils.sendMails(receviersList, receviersCcList, title, content, showType);
return MailUtils.sendMails(receiversList, receiversCcList, title, content, showType);
}
/**
* msg send
* @param receviersList the receiver list
* @param receiversList the receiver list
* @param title the title
* @param content the content
* @param showType the showType
* @return the send result
*/
public Map<String,Object> send(List<String> receviersList,String title,String content,String showType){
public Map<String,Object> send(List<String> receiversList,String title,String content,String showType){
return MailUtils.sendMails(receviersList,title, content, showType);
return MailUtils.sendMails(receiversList,title, content, showType);
}
}

14
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java

@ -71,32 +71,32 @@ public class EmailAlertPlugin implements AlertPlugin {
AlertData alert = info.getAlertData();
List<String> receviersList = (List<String>) info.getProp(Constants.PLUGIN_DEFAULT_EMAIL_RECEIVERS);
List<String> receiversList = (List<String>) info.getProp(Constants.PLUGIN_DEFAULT_EMAIL_RECEIVERS);
// receiving group list
// custom receiver
String receivers = alert.getReceivers();
if (StringUtils.isNotEmpty(receivers)) {
String[] splits = receivers.split(",");
receviersList.addAll(Arrays.asList(splits));
receiversList.addAll(Arrays.asList(splits));
}
List<String> receviersCcList = new ArrayList<>();
List<String> receiversCcList = new ArrayList<>();
// Custom Copier
String receiversCc = alert.getReceiversCc();
if (StringUtils.isNotEmpty(receiversCc)) {
String[] splits = receiversCc.split(",");
receviersCcList.addAll(Arrays.asList(splits));
receiversCcList.addAll(Arrays.asList(splits));
}
if (CollectionUtils.isEmpty(receviersList) && CollectionUtils.isEmpty(receviersCcList)) {
if (CollectionUtils.isEmpty(receiversList) && CollectionUtils.isEmpty(receiversCcList)) {
logger.warn("alert send error : At least one receiver address required");
retMaps.put(Constants.STATUS, "false");
retMaps.put(Constants.MESSAGE, "execution failure,At least one receiver address required.");
return retMaps;
}
retMaps = emailManager.send(receviersList, receviersCcList, alert.getTitle(), alert.getContent(),
retMaps = emailManager.send(receiversList, receiversCcList, alert.getTitle(), alert.getContent(),
alert.getShowType());
//send flag
@ -124,7 +124,7 @@ public class EmailAlertPlugin implements AlertPlugin {
logger.error(e.getMessage(), e);
}
}
if (DingTalkUtils.isEnableDingTalk) {
logger.info("Ding Talk is enable.");
dingTalkManager.send(info);

10
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java

@ -60,9 +60,9 @@ public class AlertSender {
users = alertDao.listUserByAlertgroupId(alert.getAlertGroupId());
// receiving group list
List<String> receviersList = new ArrayList<>();
List<String> receiversList = new ArrayList<>();
for (User user : users) {
receviersList.add(user.getEmail());
receiversList.add(user.getEmail());
}
AlertData alertData = new AlertData();
@ -78,17 +78,17 @@ public class AlertSender {
AlertInfo alertInfo = new AlertInfo();
alertInfo.setAlertData(alertData);
alertInfo.addProp("receivers", receviersList);
alertInfo.addProp("receivers", receiversList);
AlertPlugin emailPlugin = pluginManager.findOne(Constants.PLUGIN_DEFAULT_EMAIL_ID);
retMaps = emailPlugin.process(alertInfo);
if (retMaps == null) {
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, "alert send error", alert.getId());
logger.info("alert send error : return value is null");
logger.error("alert send error : return value is null");
} else if (!Boolean.parseBoolean(String.valueOf(retMaps.get(Constants.STATUS)))) {
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, String.valueOf(retMaps.get(Constants.MESSAGE)), alert.getId());
logger.info("alert send error : {}", retMaps.get(Constants.MESSAGE));
logger.error("alert send error : {}", retMaps.get(Constants.MESSAGE));
} else {
alertDao.updateAlert(AlertStatus.EXECUTION_SUCCESS, (String) retMaps.get(Constants.MESSAGE), alert.getId());
logger.info("alert send success");

19
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/impl/DefaultHTMLTemplate.java

@ -37,6 +37,7 @@ public class DefaultHTMLTemplate implements AlertTemplate {
public static final Logger logger = LoggerFactory.getLogger(DefaultHTMLTemplate.class);
@Override
public String getMessageFromTemplate(String content, ShowType showType,boolean showAll) {
@ -135,21 +136,7 @@ public class DefaultHTMLTemplate implements AlertTemplate {
checkNotNull(content);
String htmlTableThead = StringUtils.isEmpty(title) ? "" : String.format("<thead>%s</thead>\n",title);
return "<html>\n" +
" <head>\n" +
" <title>dolphinscheduler</title>\n" +
" <meta name='Keywords' content=''>\n" +
" <meta name='Description' content=''>\n" +
" <style type=\"text/css\">\n" +
" table {margin-top:0px;padding-top:0px;border:1px solid;font-size: 14px;color: #333333;border-width: 1px;border-color: #666666;border-collapse: collapse;}\n" +
" table th {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #dedede;text-align: right;}\n" +
" table td {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #ffffff;text-align: right;}\n" +
" </style>\n" +
" </head>\n" +
" <body style=\"margin:0;padding:0\">\n" +
" <table border=\"1px\" cellpadding=\"5px\" cellspacing=\"-10px\">\n" + htmlTableThead + content +
" </table>\n" +
" </body>\n" +
"</html>";
return Constants.HTML_HEADER_PREFIX +htmlTableThead + content + Constants.TABLE_BODY_HTML_TAIL;
}
}

4
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java

@ -174,6 +174,10 @@ public class Constants {
public static final String DINGTALK_ENABLE = "dingtalk.isEnable";
public static final String HTML_HEADER_PREFIX = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title>dolphinscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type=\"text/css\">table {margin-top:0px;padding-top:0px;border:1px solid;font-size: 14px;color: #333333;border-width: 1px;border-color: #666666;border-collapse: collapse;}table th {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #dedede;text-align: left;}table td {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #ffffff;text-align: left;}</style></head><body style=\"margin:0;padding:0\"><table border=\"1px\" cellpadding=\"5px\" cellspacing=\"-10px\"> ";
public static final String TABLE_BODY_HTML_TAIL = "</table></body></html>";
/**
* plugin config
*/

30
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtils.java

@ -48,8 +48,8 @@ public class EnterpriseWeChatUtils {
private static final String ENTERPRISE_WE_CHAT_TOKEN_URL = PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_TOKEN_URL);
private static final String ENTERPRISE_WE_CHAT_TOKEN_URL_REPLACE = ENTERPRISE_WE_CHAT_TOKEN_URL == null ? null : ENTERPRISE_WE_CHAT_TOKEN_URL
.replaceAll("\\$corpId", ENTERPRISE_WE_CHAT_CORP_ID)
.replaceAll("\\$secret", ENTERPRISE_WE_CHAT_SECRET);
.replaceAll("\\{corpId\\}", ENTERPRISE_WE_CHAT_CORP_ID)
.replaceAll("\\{secret\\}", ENTERPRISE_WE_CHAT_SECRET);
private static final String ENTERPRISE_WE_CHAT_PUSH_URL = PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_PUSH_URL);
@ -120,9 +120,9 @@ public class EnterpriseWeChatUtils {
* @return Enterprise WeChat send message
*/
public static String makeTeamSendMsg(String toParty, String agentId, String msg) {
return ENTERPRISE_WE_CHAT_TEAM_SEND_MSG.replaceAll("\\$toParty", toParty)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
return ENTERPRISE_WE_CHAT_TEAM_SEND_MSG.replaceAll("\\{toParty\\}", toParty)
.replaceAll("\\{agentId\\}", agentId)
.replaceAll("\\{msg\\}", msg);
}
/**
@ -135,9 +135,9 @@ public class EnterpriseWeChatUtils {
*/
public static String makeTeamSendMsg(Collection<String> toParty, String agentId, String msg) {
String listParty = FuncUtils.mkString(toParty, "|");
return ENTERPRISE_WE_CHAT_TEAM_SEND_MSG.replaceAll("\\$toParty", listParty)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
return ENTERPRISE_WE_CHAT_TEAM_SEND_MSG.replaceAll("\\{toParty\\}", listParty)
.replaceAll("\\{agentId\\}", agentId)
.replaceAll("\\{msg\\}", msg);
}
/**
@ -149,9 +149,9 @@ public class EnterpriseWeChatUtils {
* @return Enterprise WeChat send message
*/
public static String makeUserSendMsg(String toUser, String agentId, String msg) {
return ENTERPRISE_WE_CHAT_USER_SEND_MSG.replaceAll("\\$toUser", toUser)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
return ENTERPRISE_WE_CHAT_USER_SEND_MSG.replaceAll("\\{toUser\\}", toUser)
.replaceAll("\\{agentId\\}", agentId)
.replaceAll("\\{msg\\}", msg);
}
/**
@ -164,9 +164,9 @@ public class EnterpriseWeChatUtils {
*/
public static String makeUserSendMsg(Collection<String> toUser, String agentId, String msg) {
String listUser = FuncUtils.mkString(toUser, "|");
return ENTERPRISE_WE_CHAT_USER_SEND_MSG.replaceAll("\\$toUser", listUser)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
return ENTERPRISE_WE_CHAT_USER_SEND_MSG.replaceAll("\\{toUser\\}", listUser)
.replaceAll("\\{agentId\\}", agentId)
.replaceAll("\\{msg\\}", msg);
}
/**
@ -179,7 +179,7 @@ public class EnterpriseWeChatUtils {
* @throws IOException the IOException
*/
public static String sendEnterpriseWeChat(String charset, String data, String token) throws IOException {
String enterpriseWeChatPushUrlReplace = ENTERPRISE_WE_CHAT_PUSH_URL.replaceAll("\\$token", token);
String enterpriseWeChatPushUrlReplace = ENTERPRISE_WE_CHAT_PUSH_URL.replaceAll("\\{token\\}", token);
CloseableHttpClient httpClient = HttpClients.createDefault();
try {

5
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java

@ -65,6 +65,10 @@ public class MailUtils {
public static final AlertTemplate alertTemplate = AlertTemplateFactory.getMessageTemplate();
//Solve the problem of messy Chinese name in excel attachment
static {
System.setProperty("mail.mime.splitlongparameters","false");
}
/**
* send mail to receivers
@ -341,4 +345,5 @@ public class MailUtils {
retMap.put(Constants.MESSAGE, "Send email to {" + String.join(",", receivers) + "} failed," + e.toString());
}
}

8
dolphinscheduler-alert/src/main/resources/alert.properties

@ -41,10 +41,10 @@ enterprise.wechat.enable=false
#enterprise.wechat.secret=xxxxxxx
#enterprise.wechat.agent.id=xxxxxxx
#enterprise.wechat.users=xxxxxxx
#enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
#enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
#enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
#enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
#enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={corpId}&corpsecret={secret}
#enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={token}
#enterprise.wechat.team.send.msg={\"toparty\":\"{toParty}\",\"agentid\":\"{agentId}\",\"msgtype\":\"text\",\"text\":{\"content\":\"{msg}\"},\"safe\":\"0\"}
#enterprise.wechat.user.send.msg={\"touser\":\"{toUser}\",\"agentid\":\"{agentId}\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"{msg}\"}}
plugin.dir=/Users/xx/your/path/to/plugin/dir

2
dolphinscheduler-alert/src/main/resources/logback-alert.xml

@ -46,7 +46,7 @@
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
<appender-ref ref="ALERTLOGFILE"/>
</root>
</configuration>

43
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/impl/DefaultHTMLTemplateTest.java

@ -17,6 +17,7 @@
package org.apache.dolphinscheduler.alert.template.impl;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.junit.Test;
import org.slf4j.Logger;
@ -60,16 +61,16 @@ public class DefaultHTMLTemplateTest{
LinkedHashMap<String, Object> map1 = new LinkedHashMap<>();
map1.put("mysql service name","mysql200");
map1.put("mysql address","192.168.xx.xx");
map1.put("database client connections","190");
map1.put("port","3306");
map1.put("no index of number","80");
map1.put("database client connections","190");
LinkedHashMap<String, Object> map2 = new LinkedHashMap<>();
map2.put("mysql service name","mysql210");
map2.put("mysql address","192.168.xx.xx");
map2.put("database client connections","90");
map2.put("port","3306");
map2.put("no index of number","10");
map2.put("database client connections","90");
List<LinkedHashMap<String, Object>> maps = new ArrayList<>();
maps.add(0,map1);
@ -82,42 +83,14 @@ public class DefaultHTMLTemplateTest{
private String generateMockTableTypeResultByHand(){
return "<html>\n" +
" <head>\n" +
" <title>dolphinscheduler</title>\n" +
" <meta name='Keywords' content=''>\n" +
" <meta name='Description' content=''>\n" +
" <style type=\"text/css\">\n" +
" table {margin-top:0px;padding-top:0px;border:1px solid;font-size: 14px;color: #333333;border-width: 1px;border-color: #666666;border-collapse: collapse;}\n" +
" table th {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #dedede;text-align: right;}\n" +
" table td {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #ffffff;text-align: right;}\n" +
" </style>\n" +
" </head>\n" +
" <body style=\"margin:0;padding:0\">\n" +
" <table border=\"1px\" cellpadding=\"5px\" cellspacing=\"-10px\">\n" +
"<thead><tr><th>mysql service name</th><th>mysql address</th><th>port</th><th>no index of number</th><th>database client connections</th></tr></thead>\n" +
"<tr><td>mysql200</td><td>192.168.xx.xx</td><td>3306</td><td>80</td><td>190</td></tr><tr><td>mysql210</td><td>192.168.xx.xx</td><td>3306</td><td>10</td><td>90</td></tr> </table>\n" +
" </body>\n" +
"</html>";
return Constants.HTML_HEADER_PREFIX +
"<thead><tr><th>mysql service name</th><th>mysql address</th><th>database client connections</th><th>port</th><th>no index of number</th></tr></thead>\n" +
"<tr><td>mysql200</td><td>192.168.xx.xx</td><td>190</td><td>3306</td><td>80</td></tr><tr><td>mysql210</td><td>192.168.xx.xx</td><td>90</td><td>3306</td><td>10</td></tr>" + Constants.TABLE_BODY_HTML_TAIL;
}
private String generateMockTextTypeResultByHand(){
return "<html>\n" +
" <head>\n" +
" <title>dolphinscheduler</title>\n" +
" <meta name='Keywords' content=''>\n" +
" <meta name='Description' content=''>\n" +
" <style type=\"text/css\">\n" +
" table {margin-top:0px;padding-top:0px;border:1px solid;font-size: 14px;color: #333333;border-width: 1px;border-color: #666666;border-collapse: collapse;}\n" +
" table th {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #dedede;text-align: right;}\n" +
" table td {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #ffffff;text-align: right;}\n" +
" </style>\n" +
" </head>\n" +
" <body style=\"margin:0;padding:0\">\n" +
" <table border=\"1px\" cellpadding=\"5px\" cellspacing=\"-10px\">\n" +
"<tr><td>{\"mysql service name\":\"mysql200\",\"mysql address\":\"192.168.xx.xx\",\"port\":\"3306\",\"no index of number\":\"80\",\"database client connections\":\"190\"}</td></tr><tr><td>{\"mysql service name\":\"mysql210\",\"mysql address\":\"192.168.xx.xx\",\"port\":\"3306\",\"no index of number\":\"10\",\"database client connections\":\"90\"}</td></tr> </table>\n" +
" </body>\n" +
"</html>";
return Constants.HTML_HEADER_PREFIX + "<tr><td>{\"mysql service name\":\"mysql200\",\"mysql address\":\"192.168.xx.xx\",\"database client connections\":\"190\",\"port\":\"3306\",\"no index of number\":\"80\"}</td></tr><tr><td>{\"mysql service name\":\"mysql210\",\"mysql address\":\"192.168.xx.xx\",\"database client connections\":\"90\",\"port\":\"3306\",\"no index of number\":\"10\"}</td></tr>" + Constants.TABLE_BODY_HTML_TAIL;
}
}

4
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java

@ -55,8 +55,8 @@ public class EnterpriseWeChatUtilsTest {
private static final String enterpriseWechatUsers="LiGang,journey";
private static final String msg = "hello world";
private static final String enterpriseWechatTeamSendMsg = "{\\\"toparty\\\":\\\"$toParty\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"text\\\",\\\"text\\\":{\\\"content\\\":\\\"$msg\\\"},\\\"safe\\\":\\\"0\\\"}";
private static final String enterpriseWechatUserSendMsg = "{\\\"touser\\\":\\\"$toUser\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"markdown\\\",\\\"markdown\\\":{\\\"content\\\":\\\"$msg\\\"}}";
private static final String enterpriseWechatTeamSendMsg = "{\\\"toparty\\\":\\\"{toParty}\\\",\\\"agentid\\\":\\\"{agentId}\\\",\\\"msgtype\\\":\\\"text\\\",\\\"text\\\":{\\\"content\\\":\\\"{msg}\\\"},\\\"safe\\\":\\\"0\\\"}";
private static final String enterpriseWechatUserSendMsg = "{\\\"touser\\\":\\\"{toUser}\\\",\\\"agentid\\\":\\\"{agentId}\\\",\\\"msgtype\\\":\\\"markdown\\\",\\\"markdown\\\":{\\\"content\\\":\\\"{msg}\\\"}}";
@Before
public void init(){

2
dolphinscheduler-api/pom.xml

@ -21,7 +21,7 @@
<parent>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.2.1-SNAPSHOT</version>
<version>1.3.2-SNAPSHOT</version>
</parent>
<artifactId>dolphinscheduler-api</artifactId>
<name>${project.artifactId}</name>

3
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/configuration/AppConfiguration.java

@ -35,6 +35,7 @@ public class AppConfiguration implements WebMvcConfigurer {
public static final String LOGIN_INTERCEPTOR_PATH_PATTERN = "/**/*";
public static final String LOGIN_PATH_PATTERN = "/login";
public static final String REGISTER_PATH_PATTERN = "/users/register";
public static final String PATH_PATTERN = "/**";
public static final String LOCALE_LANGUAGE_COOKIE = "language";
public static final int COOKIE_MAX_AGE = 3600;
@ -76,7 +77,7 @@ public class AppConfiguration implements WebMvcConfigurer {
//i18n
registry.addInterceptor(localeChangeInterceptor());
registry.addInterceptor(loginInterceptor()).addPathPatterns(LOGIN_INTERCEPTOR_PATH_PATTERN).excludePathPatterns(LOGIN_PATH_PATTERN,"/swagger-resources/**", "/webjars/**", "/v2/**", "/doc.html", "*.html", "/ui/**");
registry.addInterceptor(loginInterceptor()).addPathPatterns(LOGIN_INTERCEPTOR_PATH_PATTERN).excludePathPatterns(LOGIN_PATH_PATTERN, REGISTER_PATH_PATTERN, "/swagger-resources/**", "/webjars/**", "/v2/**", "/doc.html", "*.html", "/ui/**");
}

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java

@ -278,7 +278,7 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, connectType: {}, other: {}",
logger.info("login user {}, connect datasource: {}, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, connectType, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Boolean isConnection = dataSourceService.checkConnection(type, parameter);

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java

@ -63,7 +63,7 @@ public class ResourcesController extends BaseController {
private UdfFuncService udfFuncService;
/**
* create resource
* create directory
*
* @param loginUser login user
* @param alias alias

31
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/UsersController.java

@ -410,5 +410,36 @@ public class UsersController extends BaseController {
}
}
/**
* user register
*
* @param userName user name
* @param userPassword user password
* @param repeatPassword repeat password
* @param email user email
*/
@ApiOperation(value="registerUser",notes = "REGISTER_USER_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userName", value = "USER_NAME", type = "String"),
@ApiImplicitParam(name = "userPassword", value = "USER_PASSWORD", type = "String"),
@ApiImplicitParam(name = "repeatPassword", value = "REPEAT_PASSWORD", type = "String"),
@ApiImplicitParam(name = "email", value = "EMAIL", type = "String"),
})
@PostMapping("/register")
@ResponseStatus(HttpStatus.OK)
@ApiException(CREATE_USER_ERROR)
public Result<Object> registerUser(@RequestParam(value = "userName") String userName,
@RequestParam(value = "userPassword") String userPassword,
@RequestParam(value = "repeatPassword") String repeatPassword,
@RequestParam(value = "email") String email) throws Exception {
userName = userName.replaceAll("[\n|\r|\t]", "");
userPassword = userPassword.replaceAll("[\n|\r|\t]", "");
repeatPassword = repeatPassword.replaceAll("[\n|\r|\t]", "");
email = email.replaceAll("[\n|\r|\t]", "");
logger.info("user self-register, userName: {}, userPassword {}, repeatPassword {}, eamil {}",
userName, userPassword, repeatPassword, email);
Map<String, Object> result = usersService.registerUser(userName, userPassword, repeatPassword, email);
return returnDataList(result);
}
}

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java

@ -1,5 +1,3 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -16,6 +14,8 @@ package org.apache.dolphinscheduler.api.dto.resources;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources;
/**
* directory
*/

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java

@ -1,5 +1,3 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -16,6 +14,8 @@ package org.apache.dolphinscheduler.api.dto.resources;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources;
/**
* file leaf
*/

16
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java

@ -1,11 +1,3 @@
package org.apache.dolphinscheduler.api.dto.resources;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import java.util.ArrayList;
import java.util.List;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -22,6 +14,14 @@ import java.util.List;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import java.util.ArrayList;
import java.util.List;
/**
* resource component
*/

9
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java

@ -1,8 +1,3 @@
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -19,6 +14,10 @@ import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
/**
* Visitor
*/

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java

@ -35,7 +35,7 @@ public enum Status {
USER_NAME_NULL(10004,"user name is null", "用户名不能为空"),
HDFS_OPERATION_ERROR(10006, "hdfs operation error", "hdfs操作错误"),
TASK_INSTANCE_NOT_FOUND(10008, "task instance not found", "任务实例不存在"),
TENANT_NAME_EXIST(10009, "tenant code already exists", "租户编码不能为空"),
TENANT_NAME_EXIST(10009, "tenant code {0} already exists", "租户编码[{0}]已存在"),
USER_NOT_EXIST(10010, "user {0} not exists", "用户[{0}]不存在"),
ALERT_GROUP_NOT_EXIST(10011, "alarm group not found", "告警组不存在"),
ALERT_GROUP_EXIST(10012, "alarm group already exists", "告警组名称已存在"),
@ -192,7 +192,7 @@ public enum Status {
RESOURCE_IS_USED(20014, "resource file is used by process definition","资源文件被上线的流程定义使用了"),
PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist","父资源文件不存在"),
RESOURCE_NOT_EXIST_OR_NO_PERMISSION(20016, "resource not exist or no permission,please view the task node and remove error resource","请检查任务节点并移除无权限或者已删除的资源"),
RESOURCE_IS_AUTHORIZED(20017, "resource is authorized to user {0},suffix not allowed to be modified", "资源文件已授权其他用户[{0}],后缀不允许修改"),
USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"),
USER_NO_OPERATION_PROJECT_PERM(30002, "user {0} is not has project {1} permission", "当前用户[{0}]没有[{1}]项目的操作权限"),

18
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/interceptor/LoginHandlerInterceptor.java

@ -50,20 +50,10 @@ public class LoginHandlerInterceptor implements HandlerInterceptor {
/**
* Intercept the execution of a handler. Called after HandlerMapping determined
* an appropriate handler object, but before HandlerAdapter invokes the handler.
* <p>DispatcherServlet processes a handler in an execution chain, consisting
* of any number of interceptors, with the handler itself at the end.
* With this method, each interceptor can decide to abort the execution chain,
* typically sending a HTTP error or writing a custom response.
* <p><strong>Note:</strong> special considerations apply for asynchronous
* request processing. For more details see
* {@link org.springframework.web.servlet.AsyncHandlerInterceptor}.
* @param request current HTTP request
* @param response current HTTP response
* @param handler chosen handler to execute, for type and/or instance evaluation
* @return {@code true} if the execution chain should proceed with the
* next interceptor or the handler itself. Else, DispatcherServlet assumes
* that this interceptor has already dealt with the response itself.
* @param request current HTTP request
* @param response current HTTP response
* @param handler chosen handler to execute, for type and/or instance evaluation
* @return boolean true or false
*/
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) {

41
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java

@ -220,12 +220,20 @@ public class DataSourceService extends BaseService{
String parameter = dataSource.getConnectionParams();
BaseDataSource datasourceForm = DataSourceFactory.getDatasource(dataSource.getType(), parameter);
DbConnectType connectType = null;
String hostSeperator = Constants.DOUBLE_SLASH;
if(DbType.ORACLE.equals(dataSource.getType())){
connectType = ((OracleDataSource) datasourceForm).getConnectType();
if(DbConnectType.ORACLE_SID.equals(connectType)){
hostSeperator = Constants.AT_SIGN;
}
}
String database = datasourceForm.getDatabase();
// jdbc connection params
String other = datasourceForm.getOther();
String address = datasourceForm.getAddress();
String[] hostsPorts = getHostsAndPort(address);
String[] hostsPorts = getHostsAndPort(address,hostSeperator);
// ip host
String host = hostsPorts[0];
// prot
@ -261,6 +269,10 @@ public class DataSourceService extends BaseService{
map.put(NAME, dataSourceName);
map.put(NOTE, desc);
map.put(TYPE, dataSourceType);
if (connectType != null) {
map.put(Constants.ORACLE_DB_CONNECT_TYPE, connectType);
}
map.put(HOST, host);
map.put(PORT, port);
map.put(PRINCIPAL, datasourceForm.getPrincipal());
@ -486,13 +498,10 @@ public class DataSourceService extends BaseService{
String password, DbConnectType connectType, String other) {
String address = buildAddress(type, host, port, connectType);
String jdbcUrl;
if (Constants.ORACLE.equals(type.name())
&& connectType == DbConnectType.ORACLE_SID) {
jdbcUrl = address + ":" + database;
} else {
jdbcUrl = address + "/" + database;
Map<String, Object> parameterMap = new LinkedHashMap<String, Object>(6);
String jdbcUrl = address + "/" + database;
if (Constants.ORACLE.equals(type.name())) {
parameterMap.put(Constants.ORACLE_DB_CONNECT_TYPE, connectType);
}
if (CommonUtils.getKerberosStartupState() &&
@ -513,7 +522,6 @@ public class DataSourceService extends BaseService{
separator = ";";
}
Map<String, Object> parameterMap = new LinkedHashMap<String, Object>(6);
parameterMap.put(TYPE, connectType);
parameterMap.put(Constants.ADDRESS, address);
parameterMap.put(Constants.DATABASE, database);
@ -683,12 +691,23 @@ public class DataSourceService extends BaseService{
/**
* get host and port by address
*
* @param address
* @param address address
* @return sting array: [host,port]
*/
private String[] getHostsAndPort(String address) {
return getHostsAndPort(address,Constants.DOUBLE_SLASH);
}
/**
* get host and port by address
*
* @param address address
* @param separator separator
* @return sting array: [host,port]
*/
private String[] getHostsAndPort(String address,String separator) {
String[] result = new String[2];
String[] tmpArray = address.split(Constants.DOUBLE_SLASH);
String[] tmpArray = address.split(separator);
String hostsAndPorts = tmpArray[tmpArray.length - 1];
StringBuilder hosts = new StringBuilder();
String[] hostPortArray = hostsAndPorts.split(Constants.COMMA);

54
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/MonitorService.java

@ -16,29 +16,33 @@
*/
package org.apache.dolphinscheduler.api.service;
import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.ZookeeperMonitor;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.dao.MonitorDBDao;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.model.WorkerServerModel;
import org.apache.dolphinscheduler.dao.MonitorDBDao;
import org.apache.dolphinscheduler.dao.entity.MonitorRecord;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.entity.ZookeeperRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.apache.dolphinscheduler.common.utils.Preconditions.*;
import com.google.common.collect.Sets;
/**
* monitor service
*/
@Service
public class MonitorService extends BaseService{
public class MonitorService extends BaseService {
@Autowired
private ZookeeperMonitor zookeeperMonitor;
@ -108,15 +112,41 @@ public class MonitorService extends BaseService{
public Map<String,Object> queryWorker(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<Server> masterServers = getServerListFromZK(false);
result.put(Constants.DATA_LIST, masterServers);
List<WorkerServerModel> workerServers = getServerListFromZK(false)
.stream()
.map((Server server) -> {
WorkerServerModel model = new WorkerServerModel();
model.setId(server.getId());
model.setHost(server.getHost());
model.setPort(server.getPort());
model.setZkDirectories(Sets.newHashSet(server.getZkDirectory()));
model.setResInfo(server.getResInfo());
model.setCreateTime(server.getCreateTime());
model.setLastHeartbeatTime(server.getLastHeartbeatTime());
return model;
})
.collect(Collectors.toList());
Map<String, WorkerServerModel> workerHostPortServerMapping = workerServers
.stream()
.collect(Collectors.toMap(
(WorkerServerModel worker) -> {
String[] s = worker.getZkDirectories().iterator().next().split("/");
return s[s.length - 1];
}
, Function.identity()
, (WorkerServerModel oldOne, WorkerServerModel newOne) -> {
oldOne.getZkDirectories().addAll(newOne.getZkDirectories());
return oldOne;
}));
result.put(Constants.DATA_LIST, workerHostPortServerMapping.values());
putMsg(result,Status.SUCCESS);
return result;
}
public List<Server> getServerListFromZK(boolean isMaster){
public List<Server> getServerListFromZK(boolean isMaster) {
checkNotNull(zookeeperMonitor);
ZKNodeType zkNodeType = isMaster ? ZKNodeType.MASTER : ZKNodeType.WORKER;

5
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java

@ -478,8 +478,6 @@ public class ProcessInstanceService extends BaseDAGService {
return checkResult;
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstanceId);
if (null == processInstance) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
return result;
@ -487,8 +485,11 @@ public class ProcessInstanceService extends BaseDAGService {
processService.removeTaskLogFile(processInstanceId);
// delete database cascade
int delete = processService.deleteWorkProcessInstanceById(processInstanceId);
processService.deleteAllSubWorkProcessByParentId(processInstanceId);
processService.deleteWorkProcessMapByParentId(processInstanceId);

66
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java

@ -31,10 +31,7 @@ import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
@ -351,24 +348,40 @@ public class ResourcesService extends BaseService {
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
String nameWithSuffix = name;
if (!resource.isDirectory()) {
//get the file suffix
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
//if the name without suffix then add it ,else use the origin name
if(!name.endsWith(suffix)){
nameWithSuffix = nameWithSuffix + suffix;
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
List<Integer> childrenResource = listAllChildren(resource,false);
String oldFullName = resource.getFullName();
Date now = new Date();
resource.setAlias(nameWithSuffix);
resource.setAlias(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
@ -380,7 +393,7 @@ public class ResourcesService extends BaseService {
List<Resource> childResourceList = new ArrayList<>();
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(oldFullName, matcherFullName));
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
@ -990,10 +1003,23 @@ public class ResourcesService extends BaseService {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getAlias());
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if(user == null){
logger.error("user id {} not exists", userId);
throw new RuntimeException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if(tenant == null){
logger.error("tenant id {} not exists", user.getTenantId());
throw new RuntimeException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
@ -1158,8 +1184,8 @@ public class ResourcesService extends BaseService {
*/
private String getTenantCode(int userId,Result result){
User user = userMapper.queryDetailsById(userId);
if(user == null){
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;

23
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java

@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.dto.ScheduleParam;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
@ -333,10 +334,9 @@ public class SchedulerService extends BaseService {
if(scheduleStatus == ReleaseState.ONLINE){
// check process definition release state
if(processDefinition.getReleaseState() != ReleaseState.ONLINE){
ProcessDefinition definition = processDefinitionMapper.selectById(scheduleObj.getProcessDefinitionId());
logger.info("not release process definition id: {} , name : {}",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, definition.getName());
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefinition.getName());
return result;
}
// check sub process definition release state
@ -380,7 +380,7 @@ public class SchedulerService extends BaseService {
switch (scheduleStatus) {
case ONLINE: {
logger.info("Call master client set schedule online, project id: {}, flow id: {},host: {}", project.getId(), processDefinition.getId(), masterServers);
setSchedule(project.getId(), id);
setSchedule(project.getId(), scheduleObj);
break;
}
case OFFLINE: {
@ -395,7 +395,7 @@ public class SchedulerService extends BaseService {
}
} catch (Exception e) {
result.put(Constants.MSG, scheduleStatus == ReleaseState.ONLINE ? "set online failure" : "set offline failure");
throw new RuntimeException(result.get(Constants.MSG).toString());
throw new ServiceException(result.get(Constants.MSG).toString());
}
putMsg(result, Status.SUCCESS);
@ -472,15 +472,10 @@ public class SchedulerService extends BaseService {
return result;
}
public void setSchedule(int projectId, int scheduleId) throws RuntimeException{
logger.info("set schedule, project id: {}, scheduleId: {}", projectId, scheduleId);
public void setSchedule(int projectId, Schedule schedule) {
Schedule schedule = processService.querySchedule(scheduleId);
if (schedule == null) {
logger.warn("process schedule info not exists");
return;
}
int scheduleId = schedule.getId();
logger.info("set schedule, project id: {}, scheduleId: {}", projectId, scheduleId);
Date startDate = schedule.getStartTime();
Date endDate = schedule.getEndTime();
@ -502,7 +497,7 @@ public class SchedulerService extends BaseService {
* @param scheduleId schedule id
* @throws RuntimeException runtime exception
*/
public static void deleteSchedule(int projectId, int scheduleId) throws RuntimeException{
public static void deleteSchedule(int projectId, int scheduleId) {
logger.info("delete schedules of project id:{}, schedule id:{}", projectId, scheduleId);
String jobName = QuartzExecutors.buildJobName(scheduleId);
@ -510,7 +505,7 @@ public class SchedulerService extends BaseService {
if(!QuartzExecutors.getInstance().deleteJob(jobName, jobGroupName)){
logger.warn("set offline failure:projectId:{},scheduleId:{}",projectId,scheduleId);
throw new RuntimeException(String.format("set offline failure"));
throw new ServiceException("set offline failure");
}
}

6
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TenantService.java

@ -327,11 +327,11 @@ public class TenantService extends BaseService{
* @return true if tenant code can user, otherwise return false
*/
public Result verifyTenantCode(String tenantCode) {
Result result=new Result();
Result result = new Result();
if (checkTenantExists(tenantCode)) {
logger.error("tenant {} has exist, can't create again.", tenantCode);
putMsg(result, Status.TENANT_NAME_EXIST);
}else{
putMsg(result, Status.TENANT_NAME_EXIST, tenantCode);
} else {
putMsg(result, Status.SUCCESS);
}
return result;

98
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UsersService.java

@ -18,7 +18,10 @@ package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.CheckUtils;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
@ -35,6 +38,7 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.io.IOException;
import java.util.*;
import java.util.stream.Collectors;
@ -306,14 +310,11 @@ public class UsersService extends BaseService {
user.setEmail(email);
}
if (StringUtils.isNotEmpty(phone)) {
if (!CheckUtils.checkPhone(phone)){
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR,phone);
return result;
}
user.setPhone(phone);
if (StringUtils.isNotEmpty(phone) && !CheckUtils.checkPhone(phone)) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR,phone);
return result;
}
user.setPhone(phone);
user.setQueue(queue);
user.setState(state);
Date now = new Date();
@ -340,18 +341,18 @@ public class UsersService extends BaseService {
List<Resource> fileResourcesList = resourceMapper.queryResourceList(
null, userId, ResourceType.FILE.ordinal());
if (CollectionUtils.isNotEmpty(fileResourcesList)) {
for (Resource resource : fileResourcesList) {
HadoopUtils.getInstance().copy(oldResourcePath + "/" + resource.getAlias(), newResourcePath, false, true);
}
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(fileResourcesList);
ResourceComponent resourceComponent = resourceTreeVisitor.visit();
copyResourceFiles(resourceComponent, oldResourcePath, newResourcePath);
}
//udf resources
List<Resource> udfResourceList = resourceMapper.queryResourceList(
null, userId, ResourceType.UDF.ordinal());
if (CollectionUtils.isNotEmpty(udfResourceList)) {
for (Resource resource : udfResourceList) {
HadoopUtils.getInstance().copy(oldUdfsPath + "/" + resource.getAlias(), newUdfsPath, false, true);
}
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(udfResourceList);
ResourceComponent resourceComponent = resourceTreeVisitor.visit();
copyResourceFiles(resourceComponent, oldUdfsPath, newUdfsPath);
}
//Delete the user from the old tenant directory
@ -517,7 +518,7 @@ public class UsersService extends BaseService {
if (CollectionUtils.isNotEmpty(oldAuthorizedResIds)) {
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
List<Map<String, Object>> list = processDefinitionMapper.listResourcesByUser(userId);
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
@ -871,4 +872,73 @@ public class UsersService extends BaseService {
return msg;
}
/**
* copy resource files
* @param resourceComponent resource component
* @param srcBasePath src base path
* @param dstBasePath dst base path
* @throws IOException io exception
*/
private void copyResourceFiles(ResourceComponent resourceComponent, String srcBasePath, String dstBasePath) throws IOException {
List<ResourceComponent> components = resourceComponent.getChildren();
if (CollectionUtils.isNotEmpty(components)) {
for (ResourceComponent component:components) {
// verify whether exist
if (!HadoopUtils.getInstance().exists(String.format("%s/%s",srcBasePath,component.getFullName()))){
logger.error("resource file: {} not exist,copy error",component.getFullName());
throw new ServiceException(Status.RESOURCE_NOT_EXIST);
}
if (!component.isDirctory()) {
// copy it to dst
HadoopUtils.getInstance().copy(String.format("%s/%s",srcBasePath,component.getFullName()),String.format("%s/%s",dstBasePath,component.getFullName()),false,true);
continue;
}
if(CollectionUtils.isEmpty(component.getChildren())) {
// if not exist,need create it
if (!HadoopUtils.getInstance().exists(String.format("%s/%s",dstBasePath,component.getFullName()))) {
HadoopUtils.getInstance().mkdir(String.format("%s/%s",dstBasePath,component.getFullName()));
}
}else{
copyResourceFiles(component,srcBasePath,dstBasePath);
}
}
}
}
/**
* register user, default state is 0, default tenant_id is 1, no phone, no queue
*
* @param userName user name
* @param userPassword user password
* @param repeatPassword repeat password
* @param email email
* @return register result code
* @throws Exception exception
*/
@Transactional(rollbackFor = Exception.class)
public Map<String, Object> registerUser(String userName, String userPassword, String repeatPassword, String email) throws Exception {
Map<String, Object> result = new HashMap<>(5);
//check user params
String msg = this.checkUserParams(userName, userPassword, email, "");
if (!StringUtils.isEmpty(msg)) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR,msg);
return result;
}
if (!userPassword.equals(repeatPassword)) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "two passwords are not same");
return result;
}
createUser(userName, userPassword, email, 1, "", "", 0);
putMsg(result, Status.SUCCESS);
return result;
}
}

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/CheckUtils.java

@ -115,7 +115,7 @@ public class CheckUtils {
*
* @param parameter parameter
* @param taskType task type
* @return true if taks node parameters are valid, otherwise return false
* @return true if task node parameters are valid, otherwise return false
*/
public static boolean checkTaskNodeParameters(String parameter, String taskType) {
AbstractParameters abstractParameters = TaskParametersUtils.getParameters(taskType, parameter);

3
dolphinscheduler-api/src/main/resources/application-api.properties

@ -23,6 +23,9 @@ server.servlet.session.timeout=7200
server.servlet.context-path=/dolphinscheduler/
# Set time zone
spring.jackson.time-zone=GMT+8
# file size limit for upload
spring.servlet.multipart.max-file-size=1024MB
spring.servlet.multipart.max-request-size=1024MB

16
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/TenantControllerTest.java

@ -120,7 +120,23 @@ public class TenantControllerTest extends AbstractControllerTest{
}
@Test
public void testVerifyTenantCodeExists() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("tenantCode", "tenantCode");
MvcResult mvcResult = mockMvc.perform(get("/tenant/verify-tenant-code")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NAME_EXIST.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryTenantlist() throws Exception {

19
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/UsersControllerTest.java

@ -268,4 +268,23 @@ public class UsersControllerTest extends AbstractControllerTest{
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testRegisterUser() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userName","user_test");
paramsMap.add("userPassword","123456qwe?");
paramsMap.add("repeatPassword", "123456qwe?");
paramsMap.add("email","12343534@qq.com");
MvcResult mvcResult = mockMvc.perform(post("/users/register")
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
}

65
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataSourceServiceTest.java

@ -16,42 +16,89 @@
*/
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.ApiApplicationServer;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@RunWith(SpringRunner.class)
@SpringBootTest(classes = ApiApplicationServer.class)
@RunWith(PowerMockRunner.class)
@PowerMockIgnore({"sun.security.*", "javax.net.*"})
public class DataSourceServiceTest {
private static final Logger logger = LoggerFactory.getLogger(DataSourceServiceTest.class);
@Autowired
@InjectMocks
private DataSourceService dataSourceService;
@Mock
private DataSourceMapper dataSourceMapper;
@Test
public void queryDataSourceList(){
public void queryDataSourceListTest(){
User loginUser = new User();
loginUser.setId(27);
loginUser.setUserType(UserType.GENERAL_USER);
Map<String, Object> map = dataSourceService.queryDataSourceList(loginUser, DbType.MYSQL.ordinal());
Assert.assertEquals(Status.SUCCESS, map.get(Constants.STATUS));
}
@Test
public void verifyDataSourceNameTest(){
User loginUser = new User();
loginUser.setUserType(UserType.GENERAL_USER);
String dataSourceName = "dataSource1";
PowerMockito.when(dataSourceMapper.queryDataSourceByName(dataSourceName)).thenReturn(getDataSourceList());
Result result = dataSourceService.verifyDataSourceName(loginUser, dataSourceName);
Assert.assertEquals(Status.DATASOURCE_EXIST.getMsg(),result.getMsg());
}
@Test
public void queryDataSourceTest(){
PowerMockito.when(dataSourceMapper.selectById(Mockito.anyInt())).thenReturn(null);
Map<String, Object> result = dataSourceService.queryDataSource(Mockito.anyInt());
Assert.assertEquals(((Status)result.get(Constants.STATUS)).getCode(),Status.RESOURCE_NOT_EXIST.getCode());
PowerMockito.when(dataSourceMapper.selectById(Mockito.anyInt())).thenReturn(getOracleDataSource());
result = dataSourceService.queryDataSource(Mockito.anyInt());
Assert.assertEquals(((Status)result.get(Constants.STATUS)).getCode(),Status.SUCCESS.getCode());
}
private List<DataSource> getDataSourceList(){
List<DataSource> dataSources = new ArrayList<>();
dataSources.add(getOracleDataSource());
return dataSources;
}
private DataSource getOracleDataSource(){
DataSource dataSource = new DataSource();
dataSource.setName("test");
dataSource.setNote("Note");
dataSource.setType(DbType.ORACLE);
dataSource.setConnectionParams("{\"connectType\":\"ORACLE_SID\",\"address\":\"jdbc:oracle:thin:@192.168.xx.xx:49161\",\"database\":\"XE\",\"jdbcUrl\":\"jdbc:oracle:thin:@192.168.xx.xx:49161/XE\",\"user\":\"system\",\"password\":\"oracle\"}");
return dataSource;
}
@Test
public void buildParameter(){
String param = dataSourceService.buildParameter("","", DbType.ORACLE, "192.168.9.1","1521","im"

14
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java

@ -177,7 +177,7 @@ public class ResourcesServiceTest {
//RESOURCE_NOT_EXIST
user.setId(1);
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsFileName(Mockito.any(), Mockito.any(),Mockito.anyString())).thenReturn("test1");
@ -209,13 +209,13 @@ public class ResourcesServiceTest {
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
//USER_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(null);
Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
@ -296,7 +296,7 @@ public class ResourcesServiceTest {
//TENANT_NOT_EXIST
loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setTenantId(2);
Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(loginUser);
Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(loginUser);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
@ -390,7 +390,7 @@ public class ResourcesServiceTest {
//TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
@ -495,7 +495,7 @@ public class ResourcesServiceTest {
//TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertTrue(Status.TENANT_NOT_EXIST.getCode() == result.getCode());
@ -514,7 +514,7 @@ public class ResourcesServiceTest {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
org.springframework.core.io.Resource resourceMock = Mockito.mock(org.springframework.core.io.Resource.class);
try {
//resource null

174
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/SchedulerServiceTest.java

@ -16,43 +16,183 @@
*/
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.ApiApplicationServer;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.QuartzExecutors;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.quartz.Scheduler;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@RunWith(SpringRunner.class)
@SpringBootTest(classes = ApiApplicationServer.class)
@RunWith(PowerMockRunner.class)
@PrepareForTest(QuartzExecutors.class)
public class SchedulerServiceTest {
private static final Logger logger = LoggerFactory.getLogger(ExecutorServiceTest.class);
@Autowired
@InjectMocks
private SchedulerService schedulerService;
@Autowired
private ExecutorService executorService;
@Mock
private MonitorService monitorService;
@Mock
private ProcessService processService;
@Mock
private ScheduleMapper scheduleMapper;
@Mock
private ProjectMapper projectMapper;
@Mock
private ProjectUserMapper projectUserMapper;
@Mock
private ProjectService projectService;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
@Mock
private QuartzExecutors quartzExecutors;
@Mock
private Scheduler scheduler;
@Before
public void setUp() {
quartzExecutors = PowerMockito.mock(QuartzExecutors.class);
PowerMockito.mockStatic(QuartzExecutors.class);
try {
PowerMockito.doReturn(quartzExecutors).when(QuartzExecutors.class, "getInstance");
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void testSetScheduleState(){
public void testSetScheduleState() {
String projectName = "test";
User loginUser = new User();
loginUser.setId(-1);
loginUser.setUserType(UserType.GENERAL_USER);
loginUser.setId(1);
Map<String, Object> result = new HashMap<String, Object>();
Project project = getProject(projectName);
ProcessDefinition processDefinition = new ProcessDefinition();
Schedule schedule = new Schedule();
schedule.setId(1);
schedule.setProcessDefinitionId(1);
schedule.setReleaseState(ReleaseState.OFFLINE);
List<Server> masterServers = new ArrayList<>();
masterServers.add(new Server());
Mockito.when(scheduleMapper.selectById(1)).thenReturn(schedule);
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(project);
Mockito.when(processService.findProcessDefineById(1)).thenReturn(processDefinition);
//hash no auth
result = schedulerService.setScheduleState(loginUser, projectName, 1, ReleaseState.ONLINE);
Mockito.when(projectService.hasProjectAndPerm(loginUser, project, result)).thenReturn(true);
//schedule not exists
result = schedulerService.setScheduleState(loginUser, projectName, 2, ReleaseState.ONLINE);
Assert.assertEquals(Status.SCHEDULE_CRON_NOT_EXISTS, result.get(Constants.STATUS));
//SCHEDULE_CRON_REALEASE_NEED_NOT_CHANGE
result = schedulerService.setScheduleState(loginUser, projectName, 1, ReleaseState.OFFLINE);
Assert.assertEquals(Status.SCHEDULE_CRON_REALEASE_NEED_NOT_CHANGE, result.get(Constants.STATUS));
//PROCESS_DEFINE_NOT_EXIST
schedule.setProcessDefinitionId(2);
result = schedulerService.setScheduleState(loginUser, projectName, 1, ReleaseState.ONLINE);
Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, result.get(Constants.STATUS));
schedule.setProcessDefinitionId(1);
// PROCESS_DEFINE_NOT_RELEASE
result = schedulerService.setScheduleState(loginUser, projectName, 1, ReleaseState.ONLINE);
Assert.assertEquals(Status.PROCESS_DEFINE_NOT_RELEASE, result.get(Constants.STATUS));
processDefinition.setReleaseState(ReleaseState.ONLINE);
Mockito.when(processService.findProcessDefineById(1)).thenReturn(processDefinition);
//MASTER_NOT_EXISTS
result = schedulerService.setScheduleState(loginUser, projectName, 1, ReleaseState.ONLINE);
Assert.assertEquals(Status.MASTER_NOT_EXISTS, result.get(Constants.STATUS));
//set master
Mockito.when(monitorService.getServerListFromZK(true)).thenReturn(masterServers);
//SUCCESS
result = schedulerService.setScheduleState(loginUser, projectName, 1, ReleaseState.ONLINE);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
//OFFLINE
Mockito.when(quartzExecutors.deleteJob(null, null)).thenReturn(true);
result = schedulerService.setScheduleState(loginUser, projectName, 1, ReleaseState.OFFLINE);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testDeleteSchedule() {
Mockito.when(quartzExecutors.deleteJob("1", "1")).thenReturn(true);
Mockito.when(quartzExecutors.buildJobGroupName(1)).thenReturn("1");
Mockito.when(quartzExecutors.buildJobName(1)).thenReturn("1");
boolean flag = true;
try {
schedulerService.deleteSchedule(1, 1);
}catch (Exception e){
flag = false;
}
Assert.assertTrue(flag);
}
private Project getProject(String name) {
Project project = new Project();
project.setName("project_test1");
project.setId(-1);
project.setName(name);
project.setUserId(1);
Map<String, Object> map = schedulerService.setScheduleState(loginUser, project.getName(), 44, ReleaseState.ONLINE);
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS));
return project;
}
}

30
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/TenantServiceTest.java

@ -16,8 +16,11 @@
*/
package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
@ -41,10 +44,10 @@ import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.i18n.LocaleContextHolder;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
@RunWith(MockitoJUnitRunner.class)
public class TenantServiceTest {
@ -61,8 +64,8 @@ public class TenantServiceTest {
@Mock
private UserMapper userMapper;
private String tenantCode ="TenantServiceTest";
private String tenantName ="TenantServiceTest";
private String tenantCode = "TenantServiceTest";
private String tenantName = "TenantServiceTest";
@Test
@ -85,6 +88,7 @@ public class TenantServiceTest {
result = tenantService.createTenant(loginUser, "test", "test", 1, "TenantServiceTest");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
} catch (Exception e) {
logger.error("create tenant error",e);
Assert.assertTrue(false);
@ -192,11 +196,17 @@ public class TenantServiceTest {
// tenantCode not exist
Result result = tenantService.verifyTenantCode("s00000000000l887888885554444sfjdskfjslakslkdf");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
// tenantCode exist
result = tenantService.verifyTenantCode(getTenant().getTenantCode());
String resultString;
if (Locale.SIMPLIFIED_CHINESE.getLanguage().equals(LocaleContextHolder.getLocale().getLanguage())) {
resultString = "租户编码[TenantServiceTest]已存在";
} else {
resultString = "tenant code TenantServiceTest already exists";
}
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NAME_EXIST.getMsg(),result.getMsg());
Assert.assertEquals(resultString, result.getMsg());
}
@ -261,4 +271,4 @@ public class TenantServiceTest {
}
}
}

45
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/UsersServiceTest.java

@ -453,6 +453,51 @@ public class UsersServiceTest {
Assert.assertTrue(CollectionUtils.isNotEmpty(userList));
}
@Test
public void testRegisterUser() {
String userName = "userTest0002~";
String userPassword = "userTest";
String repeatPassword = "userTest";
String email = "123@qq.com";
try {
//userName error
Map<String, Object> result = usersService.registerUser(userName, userPassword, repeatPassword, email);
logger.info(result.toString());
Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS));
userName = "userTest0002";
userPassword = "userTest000111111111111111";
//password error
result = usersService.registerUser(userName, userPassword, repeatPassword, email);
logger.info(result.toString());
Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS));
userPassword = "userTest0002";
email = "1q.com";
//email error
result = usersService.registerUser(userName, userPassword, repeatPassword, email);
logger.info(result.toString());
Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS));
//repeatPassword error
email = "7400@qq.com";
repeatPassword = "userPassword";
result = usersService.registerUser(userName, userPassword, repeatPassword, email);
logger.info(result.toString());
Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS));
//success
repeatPassword = "userTest0002";
result = usersService.registerUser(userName, userPassword, repeatPassword, email);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
} catch (Exception e) {
logger.error(Status.CREATE_USER_ERROR.getMsg(),e);
Assert.assertTrue(false);
}
}
/**
* get user
* @return

8
dolphinscheduler-common/pom.xml

@ -21,7 +21,7 @@
<parent>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.2.1-SNAPSHOT</version>
<version>1.3.2-SNAPSHOT</version>
</parent>
<artifactId>dolphinscheduler-common</artifactId>
<name>dolphinscheduler-common</name>
@ -84,12 +84,14 @@
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>

22
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java

@ -239,6 +239,10 @@ public final class Constants {
* EQUAL SIGN
*/
public static final String EQUAL_SIGN = "=";
/**
* AT SIGN
*/
public static final String AT_SIGN = "@";
public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg";
@ -300,7 +304,7 @@ public final class Constants {
/**
* user name regex
*/
public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,20}$");
public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$");
/**
* email regex
@ -512,7 +516,7 @@ public final class Constants {
/**
* heartbeat for zk info length
*/
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 5;
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10;
/**
@ -782,6 +786,10 @@ public final class Constants {
*/
public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state";
/**
* com.amazonaws.services.s3.enableV4
*/
public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4";
/**
* loginUserFromKeytab user
@ -921,6 +929,7 @@ public final class Constants {
public static final String JDBC_URL = "jdbcUrl";
public static final String PRINCIPAL = "principal";
public static final String OTHER = "other";
public static final String ORACLE_DB_CONNECT_TYPE = "connectType";
/**
@ -966,4 +975,13 @@ public final class Constants {
*/
public static final String PLUGIN_JAR_SUFFIX = ".jar";
public static final int NORAML_NODE_STATUS = 0;
public static final int ABNORMAL_NODE_STATUS = 1;
/**
* net system properties
*/
public static final String DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE = "dolphin.scheduler.network.interface.preferred";
}

11
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ExecutionStatus.java

@ -95,14 +95,14 @@ public enum ExecutionStatus {
public boolean typeIsFinished(){
return typeIsSuccess() || typeIsFailure() || typeIsCancel() || typeIsPause()
|| typeIsWaittingThread();
|| typeIsStop();
}
/**
* status is waiting thread
* @return status
*/
public boolean typeIsWaittingThread(){
public boolean typeIsWaitingThread(){
return this == WAITTING_THREAD;
}
@ -113,6 +113,13 @@ public enum ExecutionStatus {
public boolean typeIsPause(){
return this == PAUSE;
}
/**
* status is pause
* @return status
*/
public boolean typeIsStop(){
return this == STOP;
}
/**
* status is running

13
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/TaskNode.java

@ -123,6 +123,11 @@ public class TaskNode {
*/
private String workerGroup;
/**
* worker group id
*/
private Integer workerGroupId;
/**
* task time out
@ -340,4 +345,12 @@ public class TaskNode {
public void setConditionResult(String conditionResult) {
this.conditionResult = conditionResult;
}
public Integer getWorkerGroupId() {
return workerGroupId;
}
public void setWorkerGroupId(Integer workerGroupId) {
this.workerGroupId = workerGroupId;
}
}

122
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/WorkerServerModel.java

@ -0,0 +1,122 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.model;
import java.util.Date;
import java.util.Set;
import com.fasterxml.jackson.annotation.JsonFormat;
/**
* server
*/
public class WorkerServerModel {
/**
* id
*/
private int id;
/**
* host
*/
private String host;
/**
* port
*/
private int port;
/**
* worker directories in zookeeper
*/
private Set<String> zkDirectories;
/**
* resource info about CPU and memory
*/
private String resInfo;
/**
* create time
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date createTime;
/**
* last heart beat time
*/
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date lastHeartbeatTime;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public Set<String> getZkDirectories() {
return zkDirectories;
}
public void setZkDirectories(Set<String> zkDirectories) {
this.zkDirectories = zkDirectories;
}
public Date getLastHeartbeatTime() {
return lastHeartbeatTime;
}
public void setLastHeartbeatTime(Date lastHeartbeatTime) {
this.lastHeartbeatTime = lastHeartbeatTime;
}
public String getResInfo() {
return resInfo;
}
public void setResInfo(String resInfo) {
this.resInfo = resInfo;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
}

40
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/shell/AbstractShell.java

@ -16,9 +16,6 @@
*/
package org.apache.dolphinscheduler.common.shell;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
@ -30,6 +27,9 @@ import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A base class for running a Unix command.
@ -128,7 +128,7 @@ public abstract class AbstractShell {
/**
* Run a command actual work
*/
private void runCommand() throws IOException {
private void runCommand() throws IOException {
ProcessBuilder builder = new ProcessBuilder(getExecString());
Timer timeOutTimer = null;
ShellTimeoutTimerTask timeoutTimerTask = null;
@ -153,11 +153,11 @@ public abstract class AbstractShell {
timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
}
final BufferedReader errReader =
new BufferedReader(new InputStreamReader(process
.getErrorStream()));
BufferedReader inReader =
new BufferedReader(new InputStreamReader(process
.getInputStream()));
new BufferedReader(
new InputStreamReader(process.getErrorStream()));
BufferedReader inReader =
new BufferedReader(
new InputStreamReader(process.getInputStream()));
final StringBuilder errMsg = new StringBuilder();
// read error and input streams as this would free up the buffers
@ -177,23 +177,35 @@ public abstract class AbstractShell {
}
}
};
Thread inThread = new Thread() {
@Override
public void run() {
try {
parseExecResult(inReader);
} catch (IOException ioe) {
logger.warn("Error reading the in stream", ioe);
}
super.run();
}
};
try {
errThread.start();
inThread.start();
} catch (IllegalStateException ise) { }
try {
// parse the output
parseExecResult(inReader);
exitCode = process.waitFor();
exitCode = process.waitFor();
try {
// make sure that the error thread exits
// make sure that the error and in thread exits
errThread.join();
inThread.join();
} catch (InterruptedException ie) {
logger.warn("Interrupted while reading the error stream", ie);
logger.warn("Interrupted while reading the error and in stream", ie);
}
completed.set(true);
//the timeout thread handling
//taken care in finally block
if (exitCode != 0) {
if (exitCode != 0 || errMsg.length() > 0) {
throw new ExitCodeException(exitCode, errMsg.toString());
}
} catch (InterruptedException ie) {

13
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java

@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
@ -31,7 +32,7 @@ public class DataxParameters extends AbstractParameters {
/**
* if custom json configeg 0, 1
*/
private Integer customConfig;
private int customConfig;
/**
* if customConfig eq 1 ,then json is usable
@ -88,11 +89,11 @@ public class DataxParameters extends AbstractParameters {
*/
private int jobSpeedRecord;
public Integer getCustomConfig() {
public int getCustomConfig() {
return customConfig;
}
public void setCustomConfig(Integer customConfig) {
public void setCustomConfig(int customConfig) {
this.customConfig = customConfig;
}
@ -184,13 +185,9 @@ public class DataxParameters extends AbstractParameters {
this.jobSpeedRecord = jobSpeedRecord;
}
@Override
public boolean checkParameters() {
if (customConfig == null) {
return false;
}
if (customConfig == 0) {
if (customConfig == Flag.NO.ordinal()) {
return dataSource != 0
&& dataTarget != 0
&& StringUtils.isNotEmpty(sql)

5
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java

@ -19,13 +19,12 @@ package org.apache.dolphinscheduler.common.task.flink;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
/**
* spark parameters
* flink parameters
*/
public class FlinkParameters extends AbstractParameters {
@ -226,6 +225,4 @@ public class FlinkParameters extends AbstractParameters {
}
return resourceList;
}
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CollectionUtils.java

@ -16,8 +16,7 @@
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.commons.collections.BeanMap;
import org.apache.commons.beanutils.BeanMap;
import org.apache.commons.lang.StringUtils;
import java.util.*;

4
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java

@ -48,7 +48,7 @@ public class CommonUtils {
envPath = envDefaultPath.getPath();
logger.debug("env path :{}", envPath);
}else{
envPath = System.getProperty("user.home") + File.separator + ".bash_profile";
envPath = "/etc/profile";
}
}
@ -70,7 +70,7 @@ public class CommonUtils {
* @return true if upload resource is HDFS and kerberos startup
*/
public static boolean getKerberosStartupState(){
String resUploadStartupType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
String resUploadStartupType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
Boolean kerberosStartupState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false);
return resUploadType == ResUploadType.HDFS && kerberosStartupState;

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save