Browse Source

Merge pull request #5 from apache/dev

a
pull/2/head
Simon 4 years ago committed by GitHub
parent
commit
f632d18af5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 14
      .github/workflows/ci_e2e.yml
  2. 17
      .github/workflows/ci_ut.yml
  3. 1
      .gitignore
  4. 94
      CONTRIBUTING.md
  5. 55
      ReleaseNotes.md
  6. 4
      ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/params.py
  7. 226
      charts/README.md
  8. 52
      charts/dolphinscheduler/Chart.yaml
  9. 226
      charts/dolphinscheduler/README.md
  10. 44
      charts/dolphinscheduler/templates/NOTES.txt
  11. 149
      charts/dolphinscheduler/templates/_helpers.tpl
  12. 41
      charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
  13. 34
      charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml
  14. 39
      charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml
  15. 228
      charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
  16. 161
      charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
  17. 102
      charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
  18. 43
      charts/dolphinscheduler/templates/ingress.yaml
  19. 35
      charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml
  20. 35
      charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml
  21. 35
      charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml
  22. 29
      charts/dolphinscheduler/templates/secret-external-postgresql.yaml
  23. 247
      charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
  24. 275
      charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
  25. 35
      charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml
  26. 35
      charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml
  27. 36
      charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml
  28. 36
      charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml
  29. 355
      charts/dolphinscheduler/values.yaml
  30. 15
      docker/docker-compose.yml
  31. 10
      dockerfile/hooks/check
  32. 5
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/ExcelUtils.java
  33. 10
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/ExcelUtilsTest.java
  34. 24
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/AlertGroupController.java
  35. 33
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
  36. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessInstanceController.java
  37. 163
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
  38. 3
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/ProcessMeta.java
  39. 36
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/TaskCountDto.java
  40. 29
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java
  41. 10
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java
  42. 193
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java
  43. 28
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java
  44. 100
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
  45. 130
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java
  46. 31
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java
  47. 12
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
  48. 43
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/AlertGroupService.java
  49. 32
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
  50. 7
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java
  51. 56
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
  52. 23
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
  53. 26
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/QueueService.java
  54. 566
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
  55. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java
  56. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskRecordService.java
  57. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TenantService.java
  58. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java
  59. 38
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UserAlertGroupService.java
  60. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java
  61. 4
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java
  62. 2
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/LoginControllerTest.java
  63. 2
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/TaskRecordControllerTest.java
  64. 2
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/UsersControllerTest.java
  65. 58
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
  66. 82
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java
  67. 2
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/enums/StatusTest.java
  68. 47
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/AlertGroupServiceTest.java
  69. 112
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
  70. 53
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/UserAlertGroupServiceTest.java
  71. 7
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java
  72. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
  73. 12
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java
  74. 44
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java
  75. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/TaskStateType.java
  76. 8
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ZKNodeType.java
  77. 6
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/TaskNode.java
  78. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java
  79. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java
  80. 4
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java
  81. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java
  82. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java
  83. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java
  84. 29
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java
  85. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java
  86. 20
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java
  87. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java
  88. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java
  89. 9
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java
  90. 16
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java
  91. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java
  92. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java
  93. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java
  94. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java
  95. 36
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/ThreadPoolExecutors.java
  96. 103
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/ConnectionUtils.java
  97. 49
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
  98. 16
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java
  99. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/PropertyUtils.java
  100. 8
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/placeholder/TimePlaceholderUtils.java
  101. Some files were not shown because too many files have changed in this diff Show More

14
.github/workflows/ci_e2e.yml

@ -49,7 +49,8 @@ jobs:
- name: Docker Run
run: |
VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'`
docker run -dit -e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -p 8888:8888 dolphinscheduler:$VERSION all
mkdir -p /tmp/logs
docker run -dit -e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -v /tmp/logs:/opt/dolphinscheduler/logs -p 8888:8888 dolphinscheduler:$VERSION all
- name: Check Server Status
run: sh ./dockerfile/hooks/check
- name: Prepare e2e env
@ -65,7 +66,10 @@ jobs:
- name: Run e2e Test
run: cd ./e2e && mvn -B clean test
- name: Collect logs
run: |
mkdir -p ${LOG_DIR}
docker logs dolphinscheduler > ${LOG_DIR}/dolphinscheduler.txt
continue-on-error: true
if: failure()
uses: actions/upload-artifact@v1
with:
name: dslogs
path: /tmp/logs

17
.github/workflows/ci_ut.yml

@ -15,7 +15,11 @@
# limitations under the License.
#
on: ["pull_request", "push"]
on:
pull_request:
push:
branches:
- dev
env:
DOCKER_DIR: ./docker
LOG_DIR: /tmp/dolphinscheduler
@ -48,19 +52,18 @@ jobs:
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Git fetch unshallow
run: |
git fetch --unshallow
git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"
git fetch origin
- name: Compile
run: |
export MAVEN_OPTS='-Dmaven.repo.local=.m2/repository -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Xmx3g'
mvn test -B -Dmaven.test.skip=false
- name: Upload coverage report to codecov
if: github.event_name == 'pull_request'
run: |
CODECOV_TOKEN="09c2663f-b091-4258-8a47-c981827eb29a" bash <(curl -s https://codecov.io/bash)
- name: Git fetch unshallow
run: |
git fetch --unshallow
git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"
git fetch origin
- name: Run SonarCloud Analysis
run: >
mvn verify --batch-mode

1
.gitignore vendored

@ -145,6 +145,5 @@ dolphinscheduler-ui/dist/js/home/index.78a5d12.js.map
dolphinscheduler-ui/dist/js/login/index.291b8e3.js
dolphinscheduler-ui/dist/js/login/index.291b8e3.js.map
dolphinscheduler-ui/dist/lib/external/
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/index.vue
/dolphinscheduler-dao/src/main/resources/dao/data_source.properties

94
CONTRIBUTING.md

@ -1,35 +1,53 @@
* First from the remote repository *https://github.com/apache/incubator-dolphinscheduler.git* fork code to your own repository
* there are three branches in the remote repository currently:
* master normal delivery branch
After the stable version is released, the code for the stable version branch is merged into the master branch.
# Development
* dev daily development branch
The daily development branch, the newly submitted code can pull requests to this branch.
Start by forking the dolphinscheduler GitHub repository, make changes in a branch and then send a pull request.
## Set up your dolphinscheduler GitHub Repository
* Clone your own warehouse to your local
There are three branches in the remote repository currently:
- `master` : normal delivery branch. After the stable version is released, the code for the stable version branch is merged into the master branch.
- `dev` : daily development branch. The daily development branch, the newly submitted code can pull requests to this branch.
- `x.x.x-release` : the stable release version.
`git clone https://github.com/apache/incubator-dolphinscheduler.git`
So, you should fork the `dev` branch.
* Add remote repository address, named upstream
After forking the [dolphinscheduler upstream source repository](https://github.com/apache/incubator-dolphinscheduler/fork) to your personal repository, you can set your personal development environment.
`git remote add upstream https://github.com/apache/incubator-dolphinscheduler.git`
```sh
$ cd <your work direcotry>
$ git clone < your personal forked dolphinscheduler repo>
$ cd incubator-dolphinscheduler
```
* View repository:
## Set git remote as ``upstream``
`git remote -v`
Add remote repository address, named upstream
> There will be two repositories at this time: origin (your own warehouse) and upstream (remote repository)
```sh
git remote add upstream https://github.com/apache/incubator-dolphinscheduler.git
```
* Get/update remote repository code (already the latest code, skip it)
View repository:
`git fetch upstream`
```sh
git remote -v
```
There will be two repositories at this time: origin (your own warehouse) and upstream (remote repository)
* Synchronize remote repository code to local repository
Get/update remote repository code (already the latest code, skip it).
```sh
git fetch upstream
```
Synchronize remote repository code to local repository
```sh
git checkout origin/dev
git merge --no-ff upstream/dev
```
@ -41,24 +59,46 @@ git checkout -b dev-1.0 upstream/dev-1.0
git push --set-upstream origin dev1.0
```
* After modifying the code locally, submit it to your own repository:
## Create your feature branch
Before making code changes, make sure you create a separate branch for them.
```sh
$ git checkout -b <your-feature>
```
## Commit changes
After modifying the code locally, submit it to your own repository:
```sh
git commit -m 'information about your feature'
```
## Push to the branch
Push your locally committed changes to the remote origin (your fork).
`git commit -m 'test commit'`
`git push`
```
$ git push origin <your-feature>
```
## Create a pull request
* Submit changes to the remote repository
After submitting changes to your remote repository, you should click on the new pull request On the following github page.
* On the github page, click on the new pull request.
<p align = "center">
<img src = "http://geek.analysys.cn/static/upload/221/2019-04-02/90f3abbf-70ef-4334-b8d6-9014c9cf4c7f.png"width ="60%"/>
</ p>
<img src = "http://geek.analysys.cn/static/upload/221/2019-04-02/90f3abbf-70ef-4334-b8d6-9014c9cf4c7f.png" width ="60%"/>
</p>
Select the modified local branch and the branch to merge past to create a pull request.
* Select the modified local branch and the branch to merge past to create a pull request.
<p align = "center">
<img src = "http://geek.analysys.cn/static/upload/221/2019-04-02/fe7eecfe-2720-4736-951b-b3387cf1ae41.png"width ="60%"/>
</ p>
<img src = "http://geek.analysys.cn/static/upload/221/2019-04-02/fe7eecfe-2720-4736-951b-b3387cf1ae41.png" width ="60%"/>
</p>
* Next, the administrator is responsible for **merging** to complete the pull request
Next, the administrator is responsible for **merging** to complete the pull request.

55
ReleaseNotes.md

@ -1,55 +0,0 @@
## 1.2.0
### New Feature
1. Support postgre sql
2. Change all Chinese names to English
3. Add flink and http task support
4. Cross project dependencies
5. Modify mybatis to mybatisplus, support multy databases.
6. Add export and import definition feaure
7. Github actions ci compile check
8. Add method and parameters comments
9. Add java doc for common module
### Enhancement
1. Add license and notice files
2. Move batchDelete Process Define/Instance Outside for transactional
3. Remove space before and after login user name
4. Dockerfile optimization
5. Change mysql-connector-java scope to test
6. Owners and administrators can delete schedule
7. DB page rename and background color modification 
8. Add postgre performance monitor
9. Resolve style conflict, recipient cannot tab and value verification
10. Checkbox change background color and env to Chinese
11. Change chinese sql to english
12. Change sqlSessionTemplate singleton and reformat code 
13. The value of loadaverage should be two decimal places
14. Delete alert group need delete the relation of user and alert group
15. Remove check resources when delete tenant
16. Check processInstance state before delete worker group 
17. Add check user and definitions function when delete tenant
18. Delete before check to avoid KeeperException$NoNodeException
### Bug Fixes
1. Fix #1245, make scanCommand transactional
2. Fix ZKWorkerClient not close PathChildrenCache
3. Data type convert error ,email send error bug fix
4. Catch exception transaction method does not take effect to modify
5. Fix the spring transaction not worker bug
6. Task log print worker log bug fix
7. Fix api server debug mode bug
8. The task is abnormal and task is running bug fix
9. Fix bug: tasks queue length error
10. Fix unsuitable error message
11. Fix bug: phone can be empty
12. Fix email error password
13. Fix CheckUtils.checkUserParams method
14. The process cannot be terminated while tasks in the status submit success
15. Fix too many connection in upgrade or create 
16. Fix the bug when worker execute task using queue. and remove checking
17. Resole verify udf name error and delete udf error 
18. Fix bug: task cannot submit when recovery failover
19. Fix bug: the administrator authorizes the project to ordinary users,but ordinary users cannot see the process definition created by the administrator
20. Fix bug: create dolphinscheduler sql failed

4
ambari_plugin/common-services/DOLPHIN/1.2.1/package/scripts/params.py

@ -7,16 +7,16 @@ to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE_2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from resource_management.core.logger import Logger

226
charts/README.md

@ -0,0 +1,226 @@
# Dolphin Scheduler
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
## Introduction
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler
$ helm install --name dolphinscheduler .
```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `dolphinscheduler` deployment:
```bash
$ helm delete --purge dolphinscheduler
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
| | | |
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
| | | |
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `ingress.enabled` | Enable ingress | `false` |
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
| `ingress.path` | Ingress path | `/` |
| `ingress.tls.enabled` | Enable ingress tls | `false` |
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.

52
charts/dolphinscheduler/Chart.yaml

@ -0,0 +1,52 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v2
name: dolphinscheduler
description: Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
home: https://dolphinscheduler.apache.org
icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
keywords:
- dolphinscheduler
- Scheduler
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 1.2.1
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

226
charts/dolphinscheduler/README.md

@ -0,0 +1,226 @@
# Dolphin Scheduler
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
## Introduction
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler
$ helm install --name dolphinscheduler .
```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `dolphinscheduler` deployment:
```bash
$ helm delete --purge dolphinscheduler
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
| | | |
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
| | | |
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `ingress.enabled` | Enable ingress | `false` |
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
| `ingress.path` | Ingress path | `/` |
| `ingress.tls.enabled` | Enable ingress tls | `false` |
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.

44
charts/dolphinscheduler/templates/NOTES.txt

@ -0,0 +1,44 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
** Please be patient while the chart is being deployed **
1. Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }}
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
echo "Dolphinscheduler URL: http://$HOSTNAME/"
{{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}
2. Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }}
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
echo "Dolphinscheduler URL: http://$HOSTNAME/"
{{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}

149
charts/dolphinscheduler/templates/_helpers.tpl

@ -0,0 +1,149 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "dolphinscheduler.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dolphinscheduler.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dolphinscheduler.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "dolphinscheduler.labels" -}}
helm.sh/chart: {{ include "dolphinscheduler.chart" . }}
{{ include "dolphinscheduler.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "dolphinscheduler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "dolphinscheduler.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "dolphinscheduler.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create a default docker image registry.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.image.registry" -}}
{{- $registry := default "docker.io" .Values.image.registry -}}
{{- printf "%s" $registry | trunc 63 | trimSuffix "/" -}}
{{- end -}}
{{/*
Create a default docker image repository.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.image.repository" -}}
{{- printf "%s/%s:%s" (include "dolphinscheduler.image.registry" .) .Values.image.repository .Values.image.tag -}}
{{- end -}}
{{/*
Create a default fully qualified postgresql name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.postgresql.fullname" -}}
{{- $name := default "postgresql" .Values.postgresql.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified zookkeeper name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.zookeeper.fullname" -}}
{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified zookkeeper quorum.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.zookeeper.quorum" -}}
{{- $port := default "2181" (.Values.zookeeper.service.port | toString) -}}
{{- printf "%s:%s" (include "dolphinscheduler.zookeeper.fullname" .) $port | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker base dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.base.dir" -}}
{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker data download dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.data.download.dir" -}}
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/download" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker process exec dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.process.exec.dir" -}}
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/exec" -}}
{{- end -}}

41
charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml

@ -0,0 +1,41 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.alert.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-alert
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}
MAIL_SERVER_PORT: {{ .Values.alert.configmap.MAIL_SERVER_PORT | quote }}
MAIL_SENDER: {{ .Values.alert.configmap.MAIL_SENDER | quote }}
MAIL_USER: {{ .Values.alert.configmap.MAIL_USER | quote }}
MAIL_PASSWD: {{ .Values.alert.configmap.MAIL_PASSWD | quote }}
MAIL_SMTP_STARTTLS_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_STARTTLS_ENABLE | quote }}
MAIL_SMTP_SSL_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_SSL_ENABLE | quote }}
MAIL_SMTP_SSL_TRUST: {{ .Values.alert.configmap.MAIL_SMTP_SSL_TRUST | quote }}
ENTERPRISE_WECHAT_ENABLE: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_ENABLE | quote }}
ENTERPRISE_WECHAT_CORP_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_CORP_ID | quote }}
ENTERPRISE_WECHAT_SECRET: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_SECRET | quote }}
ENTERPRISE_WECHAT_AGENT_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_AGENT_ID | quote }}
ENTERPRISE_WECHAT_USERS: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_USERS | quote }}
{{- end }}

34
charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml

@ -0,0 +1,34 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.master.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }}
MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }}
MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }}
MASTER_TASK_COMMIT_RETRYTIMES: {{ .Values.master.configmap.MASTER_TASK_COMMIT_RETRYTIMES | quote }}
MASTER_TASK_COMMIT_INTERVAL: {{ .Values.master.configmap.MASTER_TASK_COMMIT_INTERVAL | quote }}
MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
{{- end }}

39
charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml

@ -0,0 +1,39 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.worker.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }}
WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }}
WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH: {{ include "dolphinscheduler.worker.data.download.dir" . | quote }}
DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH: {{ include "dolphinscheduler.worker.process.exec.dir" . | quote }}
dolphinscheduler_env.sh: |-
{{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
{{ . }}
{{- end }}
{{- end }}

228
charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml

@ -0,0 +1,228 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-alert
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
spec:
replicas: {{ .Values.alert.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
strategy:
type: {{ .Values.alert.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.alert.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.alert.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
spec:
{{- if .Values.alert.affinity }}
affinity: {{- toYaml .Values.alert.affinity | nindent 8 }}
{{- end }}
{{- if .Values.alert.nodeSelector }}
nodeSelector: {{- toYaml .Values.alert.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.alert.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "alert-server"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
key: XLS_FILE_PATH
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_HOST
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_HOST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_PORT
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_PORT
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SENDER
valueFrom:
configMapKeyRef:
key: MAIL_SENDER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_USER
valueFrom:
configMapKeyRef:
key: MAIL_USER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_PASSWD
valueFrom:
configMapKeyRef:
key: MAIL_PASSWD
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_STARTTLS_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_STARTTLS_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_TRUST
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_TRUST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_ENABLE
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_CORP_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_CORP_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_SECRET
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_SECRET
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_AGENT_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_AGENT_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_USERS
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_USERS
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
{{- if .Values.alert.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.alert.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.alert.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.alert.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.alert.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.alert.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.alert.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.alert.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.alert.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.alert.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.alert.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.alert.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-alert
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
{{- if .Values.alert.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-alert
{{- else }}
emptyDir: {}
{{- end }}

161
charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml

@ -0,0 +1,161 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
spec:
replicas: {{ .Values.api.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
strategy:
type: {{ .Values.api.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.api.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.api.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
spec:
{{- if .Values.api.affinity }}
affinity: {{- toYaml .Values.api.affinity | nindent 8 }}
{{- end }}
{{- if .Values.api.nodeSelector }}
nodeSelector: {{- toYaml .Values.api.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.api.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-api
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "api-server"
ports:
- containerPort: 12345
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
{{- if .Values.api.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: 12345
initialDelaySeconds: {{ .Values.api.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.api.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.api.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.api.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.api.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.api.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: 12345
initialDelaySeconds: {{ .Values.api.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.api.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.api.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.api.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.api.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-api
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-api
{{- if .Values.api.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-api
{{- else }}
emptyDir: {}
{{- end }}

102
charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml

@ -0,0 +1,102 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
spec:
replicas: {{ .Values.frontend.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
strategy:
type: {{ .Values.frontend.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.frontend.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.frontend.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
spec:
{{- if .Values.frontend.affinity }}
affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
{{- end }}
{{- if .Values.frontend.nodeSelector }}
nodeSelector: {{- toYaml .Values.frontend.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.frontend.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "frontend"
ports:
- containerPort: 8888
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: FRONTEND_API_SERVER_HOST
value: '{{ include "dolphinscheduler.fullname" . }}-api'
- name: FRONTEND_API_SERVER_PORT
value: "12345"
{{- if .Values.frontend.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.frontend.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/var/log/nginx"
name: {{ include "dolphinscheduler.fullname" . }}-frontend
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- else }}
emptyDir: {}
{{- end }}

43
charts/dolphinscheduler/templates/ingress.yaml

@ -0,0 +1,43 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ include "dolphinscheduler.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: {{ .Values.ingress.path }}
backend:
serviceName: {{ include "dolphinscheduler.fullname" . }}-frontend
servicePort: tcp-port
{{- if .Values.ingress.tls.enabled }}
tls:
hosts:
{{- range .Values.ingress.tls.hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end }}
{{- end }}

35
charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.alert.persistentVolumeClaim.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-alert
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.alert.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.alert.persistentVolumeClaim.storage | quote }}
{{- end }}

35
charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.api.persistentVolumeClaim.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.api.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.api.persistentVolumeClaim.storage | quote }}
{{- end }}

35
charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.frontend.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.frontend.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.frontend.persistentVolumeClaim.storage | quote }}
{{- end }}

29
charts/dolphinscheduler/templates/secret-external-postgresql.yaml

@ -0,0 +1,29 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if not .Values.postgresql.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-postgresql
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
type: Opaque
data:
db-password: {{ .Values.externalDatabase.password | b64enc | quote }}
{{- end }}

247
charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml

@ -0,0 +1,247 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
spec:
podManagementPolicy: {{ .Values.master.podManagementPolicy }}
replicas: {{ .Values.master.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
serviceName: {{ template "dolphinscheduler.fullname" . }}-master-headless
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
spec:
{{- if .Values.master.affinity }}
affinity: {{- toYaml .Values.master.affinity | nindent 8 }}
{{- end }}
{{- if .Values.master.nodeSelector }}
nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-master
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "master-server"
ports:
- containerPort: 8888
name: unused-tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: MASTER_EXEC_THREADS
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_EXEC_THREADS
- name: MASTER_EXEC_TASK_NUM
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_EXEC_TASK_NUM
- name: MASTER_HEARTBEAT_INTERVAL
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_HEARTBEAT_INTERVAL
- name: MASTER_TASK_COMMIT_RETRYTIMES
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_TASK_COMMIT_RETRYTIMES
- name: MASTER_TASK_COMMIT_INTERVAL
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_TASK_COMMIT_INTERVAL
- name: MASTER_MAX_CPULOAD_AVG
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_MAX_CPULOAD_AVG
- name: MASTER_RESERVED_MEMORY
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_RESERVED_MEMORY
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: TASK_QUEUE
{{- if .Values.zookeeper.enabled }}
value: {{ .Values.zookeeper.taskQueue }}
{{- else }}
value: {{ .Values.externalZookeeper.taskQueue }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: {{ template "dolphinscheduler.zookeeper.quorum" . }}
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
{{- if .Values.master.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- master-server
initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.master.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- master-server
initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-master
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-master
{{- if .Values.master.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-master
{{- else }}
emptyDir: {}
{{- end }}
{{- if .Values.master.persistentVolumeClaim.enabled }}
volumeClaimTemplates:
- metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.master.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.master.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.master.persistentVolumeClaim.storage | quote }}
{{- end }}

275
charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml

@ -0,0 +1,275 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
spec:
podManagementPolicy: {{ .Values.worker.podManagementPolicy }}
replicas: {{ .Values.worker.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
serviceName: {{ template "dolphinscheduler.fullname" . }}-worker-headless
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
spec:
{{- if .Values.worker.affinity }}
affinity: {{- toYaml .Values.worker.affinity | nindent 8 }}
{{- end }}
{{- if .Values.worker.nodeSelector }}
nodeSelector: {{- toYaml .Values.worker.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.worker.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-worker
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "worker-server"
ports:
- containerPort: 50051
name: "logs-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: WORKER_EXEC_THREADS
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_EXEC_THREADS
- name: WORKER_FETCH_TASK_NUM
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_FETCH_TASK_NUM
- name: WORKER_HEARTBEAT_INTERVAL
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_HEARTBEAT_INTERVAL
- name: WORKER_MAX_CPULOAD_AVG
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_MAX_CPULOAD_AVG
- name: WORKER_RESERVED_MEMORY
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_RESERVED_MEMORY
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: TASK_QUEUE
{{- if .Values.zookeeper.enabled }}
value: {{ .Values.zookeeper.taskQueue }}
{{- else }}
value: {{ .Values.externalZookeeper.taskQueue }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
{{- if .Values.worker.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.worker.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.worker.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.worker.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.worker.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
- mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"
subPath: "dolphinscheduler_env.sh"
name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-worker-data
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-worker-data
{{- else }}
emptyDir: {}
{{- end }}
- name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
{{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-worker-logs
{{- else }}
emptyDir: {}
{{- end }}
- name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
configMap:
defaultMode: 0777
name: {{ include "dolphinscheduler.fullname" . }}-worker
items:
- key: dolphinscheduler_env.sh
path: dolphinscheduler_env.sh
{{- if .Values.worker.persistentVolumeClaim.enabled }}
volumeClaimTemplates:
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
- metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-data
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storage | quote }}
{{- end }}
{{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
- metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storage | quote }}
{{- end }}
{{- end }}

35
charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
ports:
- port: 12345
targetPort: tcp-port
protocol: TCP
name: tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api

35
charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
ports:
- port: 8888
targetPort: tcp-port
protocol: TCP
name: tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend

36
charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml

@ -0,0 +1,36 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master-headless
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master-headless
app.kubernetes.io/instance: {{ .Release.Name }}-master-headless
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
clusterIP: "None"
ports:
- port: 8888
targetPort: tcp-port
protocol: TCP
name: unused-tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master

36
charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml

@ -0,0 +1,36 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
app.kubernetes.io/instance: {{ .Release.Name }}-worker-headless
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
clusterIP: "None"
ports:
- port: 50051
targetPort: logs-port
protocol: TCP
name: logs-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker

355
charts/dolphinscheduler/values.yaml

@ -0,0 +1,355 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Default values for dolphinscheduler-chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
nameOverride: ""
fullnameOverride: ""
timezone: "Asia/Shanghai"
image:
registry: "docker.io"
repository: "dolphinscheduler"
tag: "1.2.1"
pullPolicy: "IfNotPresent"
imagePullSecrets: []
# If not exists external postgresql, by default, Dolphinscheduler's database will use it.
postgresql:
enabled: true
postgresqlUsername: "root"
postgresqlPassword: "root"
postgresqlDatabase: "dolphinscheduler"
persistence:
enabled: false
size: "20Gi"
storageClass: "-"
# If exists external postgresql, and set postgresql.enable value to false.
# If postgresql.enable is false, Dolphinscheduler's database will use it.
externalDatabase:
host: "localhost"
port: "5432"
username: "root"
password: "root"
database: "dolphinscheduler"
# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
zookeeper:
enabled: true
taskQueue: "zookeeper"
persistence:
enabled: false
size: "20Gi"
storageClass: "-"
# If exists external zookeeper, and set zookeeper.enable value to false.
# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
externalZookeeper:
taskQueue: "zookeeper"
zookeeperQuorum: "127.0.0.1:2181"
master:
podManagementPolicy: "Parallel"
replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
MASTER_HEARTBEAT_INTERVAL: "10"
MASTER_TASK_COMMIT_RETRYTIMES: "5"
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
worker:
podManagementPolicy: "Parallel"
replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
configmap:
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
DOLPHINSCHEDULER_ENV:
- "export HADOOP_HOME=/opt/soft/hadoop"
- "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
- "export SPARK_HOME1=/opt/soft/spark1"
- "export SPARK_HOME2=/opt/soft/spark2"
- "export PYTHON_HOME=/opt/soft/python"
- "export JAVA_HOME=/opt/soft/java"
- "export HIVE_HOME=/opt/soft/hive"
- "export FLINK_HOME=/opt/soft/flink"
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
## dolphinscheduler data volume
dataPersistentVolume:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
## dolphinscheduler logs volume
logsPersistentVolume:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
alert:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: false
MAIL_SMTP_SSL_ENABLE: false
MAIL_SMTP_SSL_TRUST: ""
ENTERPRISE_WECHAT_ENABLE: false
ENTERPRISE_WECHAT_CORP_ID: ""
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
api:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
frontend:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
ingress:
enabled: false
host: "dolphinscheduler.org"
path: "/"
tls:
enabled: false
hosts:
- "dolphinscheduler.org"
secretName: "dolphinscheduler-tls"

15
docker/docker-compose.yml

@ -1,3 +1,18 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: '2'
services:
zookeeper:

10
dockerfile/hooks/check

@ -16,7 +16,7 @@
# limitations under the License.
#
echo "------ dolphinscheduler check - server - status -------"
sleep 20
sleep 60
server_num=$(docker top `docker container list | grep '/sbin/tini' | awk '{print $1}'`| grep java | grep "dolphinscheduler" | awk -F 'classpath ' '{print $2}' | awk '{print $2}' | sort | uniq -c | wc -l)
if [ $server_num -eq 5 ]
then
@ -25,3 +25,11 @@ else
echo "Server start failed "$server_num
exit 1
fi
ready=`curl http://127.0.0.1:8888/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l`
if [ $ready -eq 1 ]
then
echo "Servers is ready"
else
echo "Servers is not ready"
exit 1
fi

5
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/ExcelUtils.java

@ -26,6 +26,7 @@ import org.apache.poi.ss.usermodel.HorizontalAlignment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.*;
@ -102,7 +103,11 @@ public class ExcelUtils {
for (int i = 0; i < headerList.size(); i++) {
sheet.setColumnWidth(i, headerList.get(i).length() * 800);
}
File file = new File(xlsFilePath);
if (!file.exists()) {
file.mkdirs();
}
//setting file output

10
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/ExcelUtilsTest.java

@ -89,4 +89,14 @@ public class ExcelUtilsTest {
ExcelUtils.genExcelFile(incorrectContent1, title, xlsFilePath);
}
/**
* Test GenExcelFile (check directory)
*/
@Test
public void testGenExcelFileByCheckDir() {
ExcelUtils.genExcelFile("[{\"a\": \"a\"},{\"a\": \"a\"}]", "t", "/tmp/xls");
File file = new File("/tmp/xls" + Constants.SINGLE_SLASH + "t" + Constants.EXCEL_SUFFIX_XLS);
file.delete();
}
}

24
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/AlertGroupController.java

@ -93,11 +93,11 @@ public class AlertGroupController extends BaseController{
public Result list(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) {
logger.info("login user {}, query all alertGroup",
loginUser.getUserName());
try{
try {
HashMap<String, Object> result = alertGroupService.queryAlertgroup();
return returnDataList(result);
}catch (Exception e){
logger.error(Status.QUERY_ALL_ALERTGROUP_ERROR.getMsg(),e);
} catch (Exception e) {
logger.error(Status.QUERY_ALL_ALERTGROUP_ERROR.getMsg(), e);
return error(Status.QUERY_ALL_ALERTGROUP_ERROR.getCode(), Status.QUERY_ALL_ALERTGROUP_ERROR.getMsg());
}
}
@ -214,12 +214,20 @@ public class AlertGroupController extends BaseController{
@GetMapping(value = "/verify-group-name")
@ResponseStatus(HttpStatus.OK)
public Result verifyGroupName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="groupName") String groupName
) {
logger.info("login user {}, verfiy group name: {}",
loginUser.getUserName(),groupName);
@RequestParam(value ="groupName") String groupName) {
logger.info("login user {}, verify group name: {}", loginUser.getUserName(), groupName);
return alertGroupService.verifyGroupName(loginUser, groupName);
boolean exist= alertGroupService.existGroupName(groupName);
Result result = new Result();
if (exist) {
logger.error("group {} has exist, can't create again.", groupName);
result.setCode(Status.ALERT_GROUP_EXIST.getCode());
result.setMsg(Status.ALERT_GROUP_EXIST.getMsg());
} else {
result.setCode(Status.SUCCESS.getCode());
result.setMsg(Status.SUCCESS.getMsg());
}
return result;
}
/**

33
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java

@ -16,18 +16,19 @@
*/
package org.apache.dolphinscheduler.api.controller;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.DataSourceService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -76,6 +77,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
})
@PostMapping(value = "/create")
@ -90,11 +92,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {},port: {},database : {},principal: {},userName : {} other: {}",
loginUser.getUserName(), name, note, type, host,port,database,principal,userName,other);
logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {}, port: {}, database : {}, principal: {}, userName : {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, host, port, database, principal, userName, connectType, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Map<String, Object> result = dataSourceService.createDataSource(loginUser, name, note, type, parameter);
return returnDataList(result);
@ -133,6 +136,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
})
@PostMapping(value = "/update")
@ -148,11 +152,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, connectType, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, connectType, other);
Map<String, Object> dataSource = dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter);
return returnDataList(dataSource);
} catch (Exception e) {
@ -277,6 +282,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
})
@PostMapping(value = "/connect")
@ -291,11 +297,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, connectType, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Boolean isConnection = dataSourceService.checkConnection(type, parameter);
Result result = new Result();

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessInstanceController.java

@ -391,7 +391,7 @@ public class ProcessInstanceController extends BaseController{
}
}
}
if(deleteFailedIdList.size() > 0){
if(!deleteFailedIdList.isEmpty()){
putMsg(result, Status.BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR, String.join(",", deleteFailedIdList));
}else{
putMsg(result, Status.SUCCESS);

163
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java

@ -60,6 +60,50 @@ public class ResourcesController extends BaseController{
@Autowired
private UdfFuncService udfFuncService;
/**
* create resource
*
* @param loginUser login user
* @param alias alias
* @param description description
* @param type type
* @return create result code
*/
/**
*
* @param loginUser login user
* @param type type
* @param alias alias
* @param description description
* @param pid parent id
* @param currentDir current directory
* @return
*/
@ApiOperation(value = "createDirctory", notes= "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType ="String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/directory/create")
public Result createDirectory(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value ="name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value ="pid") int pid,
@RequestParam(value ="currentDir") String currentDir) {
try {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(),type, alias, description,pid,currentDir);
return resourceService.createDirectory(loginUser,alias, description,type ,pid,currentDir);
} catch (Exception e) {
logger.error(CREATE_RESOURCE_ERROR.getMsg(),e);
return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg());
}
}
/**
* create resource
*
@ -80,13 +124,15 @@ public class ResourcesController extends BaseController{
@PostMapping(value = "/create")
public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value ="name")String alias,
@RequestParam(value ="name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam("file") MultipartFile file) {
@RequestParam("file") MultipartFile file,
@RequestParam(value ="pid") int pid,
@RequestParam(value ="currentDir") String currentDir) {
try {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(),type, alias, description, file.getName(), file.getOriginalFilename());
return resourceService.createResource(loginUser,alias, description,type ,file);
return resourceService.createResource(loginUser,alias, description,type ,file,pid,currentDir);
} catch (Exception e) {
logger.error(CREATE_RESOURCE_ERROR.getMsg(),e);
return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg());
@ -108,8 +154,7 @@ public class ResourcesController extends BaseController{
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100"),
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType ="String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true,dataType = "MultipartFile")
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType ="String")
})
@PostMapping(value = "/update")
public Result updateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@ -120,7 +165,7 @@ public class ResourcesController extends BaseController{
try {
logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}",
loginUser.getUserName(),type, alias, description);
return resourceService.updateResource(loginUser,resourceId,alias, description,type);
return resourceService.updateResource(loginUser,resourceId,alias,description,type);
} catch (Exception e) {
logger.error(UPDATE_RESOURCE_ERROR.getMsg(),e);
return error(Status.UPDATE_RESOURCE_ERROR.getCode(), Status.UPDATE_RESOURCE_ERROR.getMsg());
@ -144,7 +189,7 @@ public class ResourcesController extends BaseController{
@RequestParam(value ="type") ResourceType type
){
try{
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString());
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type);
Map<String, Object> result = resourceService.queryResourceList(loginUser, type);
return returnDataList(result);
}catch (Exception e){
@ -166,6 +211,7 @@ public class ResourcesController extends BaseController{
@ApiOperation(value = "queryResourceListPaging", notes= "QUERY_RESOURCE_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="int"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType ="String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType ="Int",example = "20")
@ -174,20 +220,21 @@ public class ResourcesController extends BaseController{
@ResponseStatus(HttpStatus.OK)
public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="type") ResourceType type,
@RequestParam(value ="id") int id,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
){
try{
logger.info("query resource list, login user:{}, resource type:{}, search value:{}",
loginUser.getUserName(), type.toString(), searchVal);
loginUser.getUserName(), type, searchVal);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if(result.get(Constants.STATUS) != Status.SUCCESS){
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = resourceService.queryResourceListPaging(loginUser,type,searchVal,pageNo, pageSize);
result = resourceService.queryResourceListPaging(loginUser,id,type,searchVal,pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
logger.error(QUERY_RESOURCES_LIST_PAGING.getMsg(),e);
@ -227,32 +274,89 @@ public class ResourcesController extends BaseController{
* verify resource by alias and type
*
* @param loginUser login user
* @param alias resource name
* @param type resource type
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "verifyResourceName", notes= "VERIFY_RESOURCE_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String")
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType ="String")
})
@GetMapping(value = "/verify-name")
@ResponseStatus(HttpStatus.OK)
public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="name") String alias,
@RequestParam(value ="fullName") String fullName,
@RequestParam(value ="type") ResourceType type
) {
try {
logger.info("login user {}, verfiy resource alias: {},resource type: {}",
loginUser.getUserName(), alias,type);
loginUser.getUserName(), fullName,type);
return resourceService.verifyResourceName(alias,type,loginUser);
return resourceService.verifyResourceName(fullName,type,loginUser);
} catch (Exception e) {
logger.error(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg(), e);
return error(Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getCode(), Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg());
}
}
/**
* query resources jar list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceJarList", notes= "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType")
})
@GetMapping(value="/list/jar")
@ResponseStatus(HttpStatus.OK)
public Result queryResourceJarList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="type") ResourceType type
){
try{
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString());
Map<String, Object> result = resourceService.queryResourceJarList(loginUser, type);
return returnDataList(result);
}catch (Exception e){
logger.error(QUERY_RESOURCES_LIST_ERROR.getMsg(),e);
return error(Status.QUERY_RESOURCES_LIST_ERROR.getCode(), Status.QUERY_RESOURCES_LIST_ERROR.getMsg());
}
}
/**
* query resource by full name and type
*
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "queryResource", notes= "QUERY_BY_RESOURCE_NAME")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType ="String")
})
@GetMapping(value = "/queryResource")
@ResponseStatus(HttpStatus.OK)
public Result queryResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="fullName",required = false) String fullName,
@RequestParam(value ="id",required = false) Integer id,
@RequestParam(value ="type") ResourceType type
) {
try {
logger.info("login user {}, query resource by full name: {} or id: {},resource type: {}",
loginUser.getUserName(), fullName,id,type);
return resourceService.queryResource(fullName,id,type);
} catch (Exception e) {
logger.error(RESOURCE_NOT_EXIST.getMsg(), e);
return error(Status.RESOURCE_NOT_EXIST.getCode(), Status.RESOURCE_NOT_EXIST.getMsg());
}
}
/**
* view resource file online
*
@ -310,16 +414,18 @@ public class ResourcesController extends BaseController{
@RequestParam(value ="fileName")String fileName,
@RequestParam(value ="suffix")String fileSuffix,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "content") String content
@RequestParam(value = "content") String content,
@RequestParam(value ="pid") int pid,
@RequestParam(value ="currentDir") String currentDir
) {
try{
logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}",
loginUser.getUserName(),fileName,type,fileSuffix,description,content);
loginUser.getUserName(),fileName,type,fileSuffix,description,content,pid,currentDir);
if(StringUtils.isEmpty(content)){
logger.error("resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.onlineCreateResource(loginUser,type,fileName,fileSuffix,description,content);
return resourceService.onlineCreateResource(loginUser,type,fileName,fileSuffix,description,content,pid,currentDir);
}catch (Exception e){
logger.error(CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg(),e);
return error(Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getCode(), Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg());
@ -384,6 +490,9 @@ public class ResourcesController extends BaseController{
.ok()
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"")
.body(file);
}catch (RuntimeException e){
logger.error(e.getMessage(),e);
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(e.getMessage());
}catch (Exception e){
logger.error(DOWNLOAD_RESOURCE_FILE_ERROR.getMsg(),e);
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.DOWNLOAD_RESOURCE_FILE_ERROR.getMsg());
@ -426,8 +535,6 @@ public class ResourcesController extends BaseController{
@RequestParam(value = "resourceId") int resourceId) {
logger.info("login user {}, create udf function, type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
loginUser.getUserName(),type, funcName, argTypes,database,description, resourceId);
Result result = new Result();
try {
return udfFuncService.createUdfFunction(loginUser,funcName,className,argTypes,database,description,type,resourceId);
} catch (Exception e) {
@ -563,7 +670,7 @@ public class ResourcesController extends BaseController{
public Result queryResourceList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("type") UdfType type){
try{
logger.info("query datasource list, user:{}, type:{}", loginUser.getUserName(), type.toString());
logger.info("query datasource list, user:{}, type:{}", loginUser.getUserName(), type);
Map<String, Object> result = udfFuncService.queryResourceList(loginUser,type.ordinal());
return returnDataList(result);
}catch (Exception e){
@ -660,21 +767,21 @@ public class ResourcesController extends BaseController{
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "unauthorizedFile", notes= "UNAUTHORIZED_FILE_NOTES")
@ApiOperation(value = "authorizeResourceTree", notes= "AUTHORIZE_RESOURCE_TREE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/unauth-file")
@GetMapping(value = "/authorize-resource-tree")
@ResponseStatus(HttpStatus.CREATED)
public Result unauthorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
public Result authorizeResourceTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
try{
logger.info("resource unauthorized file, user:{}, unauthorized user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.unauthorizedFile(loginUser, userId);
logger.info("all resource file, user:{}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizeResourceTree(loginUser, userId);
return returnDataList(result);
}catch (Exception e){
logger.error(UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg(),e);
return error(Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getCode(), Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg());
logger.error(AUTHORIZE_RESOURCE_TREE.getMsg(),e);
return error(Status.AUTHORIZE_RESOURCE_TREE.getCode(), Status.AUTHORIZE_RESOURCE_TREE.getMsg());
}
}

3
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/ProcessMeta.java

@ -106,9 +106,6 @@ public class ProcessMeta {
*/
private String scheduleWorkerGroupName;
public ProcessMeta() {
}
public String getProjectName() {
return projectName;
}

36
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/TaskCountDto.java

@ -43,36 +43,36 @@ public class TaskCountDto {
}
private void countTaskDtos(List<ExecuteStatusCount> taskInstanceStateCounts){
int submitted_success = 0;
int running_exeution = 0;
int ready_pause = 0;
int submittedSuccess = 0;
int runningExeution = 0;
int readyPause = 0;
int pause = 0;
int ready_stop = 0;
int readyStop = 0;
int stop = 0;
int failure = 0;
int success = 0;
int need_fault_tolerance = 0;
int needFaultTolerance = 0;
int kill = 0;
int waitting_thread = 0;
int waittingThread = 0;
for(ExecuteStatusCount taskInstanceStateCount : taskInstanceStateCounts){
ExecutionStatus status = taskInstanceStateCount.getExecutionStatus();
totalCount += taskInstanceStateCount.getCount();
switch (status){
case SUBMITTED_SUCCESS:
submitted_success += taskInstanceStateCount.getCount();
submittedSuccess += taskInstanceStateCount.getCount();
break;
case RUNNING_EXEUTION:
running_exeution += taskInstanceStateCount.getCount();
runningExeution += taskInstanceStateCount.getCount();
break;
case READY_PAUSE:
ready_pause += taskInstanceStateCount.getCount();
readyPause += taskInstanceStateCount.getCount();
break;
case PAUSE:
pause += taskInstanceStateCount.getCount();
break;
case READY_STOP:
ready_stop += taskInstanceStateCount.getCount();
readyStop += taskInstanceStateCount.getCount();
break;
case STOP:
stop += taskInstanceStateCount.getCount();
@ -84,13 +84,13 @@ public class TaskCountDto {
success += taskInstanceStateCount.getCount();
break;
case NEED_FAULT_TOLERANCE:
need_fault_tolerance += taskInstanceStateCount.getCount();
needFaultTolerance += taskInstanceStateCount.getCount();
break;
case KILL:
kill += taskInstanceStateCount.getCount();
break;
case WAITTING_THREAD:
waitting_thread += taskInstanceStateCount.getCount();
waittingThread += taskInstanceStateCount.getCount();
break;
default:
@ -98,17 +98,17 @@ public class TaskCountDto {
}
}
this.taskCountDtos = new ArrayList<>();
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.SUBMITTED_SUCCESS, submitted_success));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.RUNNING_EXEUTION, running_exeution));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.READY_PAUSE, ready_pause));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.SUBMITTED_SUCCESS, submittedSuccess));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.RUNNING_EXEUTION, runningExeution));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.READY_PAUSE, readyPause));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.PAUSE, pause));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.READY_STOP, ready_stop));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.READY_STOP, readyStop));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.STOP, stop));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.FAILURE, failure));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.SUCCESS, success));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.NEED_FAULT_TOLERANCE, need_fault_tolerance));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.NEED_FAULT_TOLERANCE, needFaultTolerance));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.KILL, kill));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.WAITTING_THREAD, waitting_thread));
this.taskCountDtos.add(new TaskStateCount(ExecutionStatus.WAITTING_THREAD, waittingThread));
}

29
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java

@ -0,0 +1,29 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* directory
*/
public class Directory extends ResourceComponent{
@Override
public boolean isDirctory() {
return true;
}
}

10
dolphinscheduler-ui/src/sass/common/_mixin.scss → dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java

@ -1,3 +1,5 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -13,4 +15,10 @@
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
*/
/**
* file leaf
*/
public class FileLeaf extends ResourceComponent{
}

193
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java

@ -0,0 +1,193 @@
package org.apache.dolphinscheduler.api.dto.resources;
import com.alibaba.fastjson.annotation.JSONField;
import com.alibaba.fastjson.annotation.JSONType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import java.util.ArrayList;
import java.util.List;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* resource component
*/
@JSONType(orders={"id","pid","name","fullName","description","isDirctory","children","type"})
public abstract class ResourceComponent {
public ResourceComponent() {
}
public ResourceComponent(int id, int pid, String name, String fullName, String description, boolean isDirctory) {
this.id = id;
this.pid = pid;
this.name = name;
this.fullName = fullName;
this.description = description;
this.isDirctory = isDirctory;
int directoryFlag = isDirctory ? 1:0;
this.idValue = String.format("%s_%s",id,directoryFlag);
}
/**
* id
*/
@JSONField(ordinal = 1)
protected int id;
/**
* parent id
*/
@JSONField(ordinal = 2)
protected int pid;
/**
* name
*/
@JSONField(ordinal = 3)
protected String name;
/**
* current directory
*/
protected String currentDir;
/**
* full name
*/
@JSONField(ordinal = 4)
protected String fullName;
/**
* description
*/
@JSONField(ordinal = 5)
protected String description;
/**
* is directory
*/
@JSONField(ordinal = 6)
protected boolean isDirctory;
/**
* id value
*/
@JSONField(ordinal = 7)
protected String idValue;
/**
* resoruce type
*/
@JSONField(ordinal = 8)
protected ResourceType type;
/**
* children
*/
@JSONField(ordinal = 8)
protected List<ResourceComponent> children = new ArrayList<>();
/**
* add resource component
* @param resourceComponent resource component
*/
public void add(ResourceComponent resourceComponent){
children.add(resourceComponent);
}
public String getName(){
return this.name;
}
public String getDescription(){
return this.description;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public int getPid() {
return pid;
}
public void setPid(int pid) {
this.pid = pid;
}
public void setName(String name) {
this.name = name;
}
public String getFullName() {
return fullName;
}
public void setFullName(String fullName) {
this.fullName = fullName;
}
public void setDescription(String description) {
this.description = description;
}
public boolean isDirctory() {
return isDirctory;
}
public void setDirctory(boolean dirctory) {
isDirctory = dirctory;
}
public String getIdValue() {
return idValue;
}
public void setIdValue(int id,boolean isDirctory) {
int directoryFlag = isDirctory ? 1:0;
this.idValue = String.format("%s_%s",id,directoryFlag);
}
public ResourceType getType() {
return type;
}
public void setType(ResourceType type) {
this.type = type;
}
public List<ResourceComponent> getChildren() {
return children;
}
public void setChildren(List<ResourceComponent> children) {
this.children = children;
}
@Override
public String toString() {
return "ResourceComponent{" +
"id=" + id +
", pid=" + pid +
", name='" + name + '\'' +
", currentDir='" + currentDir + '\'' +
", fullName='" + fullName + '\'' +
", description='" + description + '\'' +
", isDirctory=" + isDirctory +
", idValue='" + idValue + '\'' +
", type=" + type +
", children=" + children +
'}';
}
}

28
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.List;
/**
* interface filter
*/
public interface IFilter {
List<Resource> filter();
}

100
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java

@ -0,0 +1,100 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/**
* resource filter
*/
public class ResourceFilter implements IFilter {
/**
* resource suffix
*/
private String suffix;
/**
* resource list
*/
private List<Resource> resourceList;
/**
* parent list
*/
//Set<Resource> parentList = new HashSet<>();
/**
* constructor
* @param suffix resource suffix
* @param resourceList resource list
*/
public ResourceFilter(String suffix, List<Resource> resourceList) {
this.suffix = suffix;
this.resourceList = resourceList;
}
/**
* file filter
* @return file filtered by suffix
*/
public Set<Resource> fileFilter(){
Set<Resource> resources = resourceList.stream().filter(t -> {
String alias = t.getAlias();
return alias.endsWith(suffix);
}).collect(Collectors.toSet());
return resources;
}
/**
* list all parent dir
* @return parent resource dir set
*/
Set<Resource> listAllParent(){
Set<Resource> parentList = new HashSet<>();
Set<Resource> filterFileList = fileFilter();
for(Resource file:filterFileList){
parentList.add(file);
setAllParent(file,parentList);
}
return parentList;
}
/**
* list all parent dir
* @param resource resource
* @return parent resource dir set
*/
private void setAllParent(Resource resource,Set<Resource> parentList){
for (Resource resourceTemp : resourceList) {
if (resourceTemp.getId() == resource.getPid()) {
parentList.add(resourceTemp);
setAllParent(resourceTemp,parentList);
}
}
}
@Override
public List<Resource> filter() {
return new ArrayList<>(listAllParent());
}
}

130
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java

@ -0,0 +1,130 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.Directory;
import org.apache.dolphinscheduler.api.dto.resources.FileLeaf;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.ArrayList;
import java.util.List;
/**
* resource tree visitor
*/
public class ResourceTreeVisitor implements Visitor{
/**
* resource list
*/
private List<Resource> resourceList;
public ResourceTreeVisitor() {
}
/**
* constructor
* @param resourceList resource list
*/
public ResourceTreeVisitor(List<Resource> resourceList) {
this.resourceList = resourceList;
}
/**
* visit
* @return resoruce component
*/
public ResourceComponent visit() {
ResourceComponent rootDirectory = new Directory();
for (Resource resource : resourceList) {
// judge whether is root node
if (rootNode(resource)){
ResourceComponent tempResourceComponent = getResourceComponent(resource);
rootDirectory.add(tempResourceComponent);
tempResourceComponent.setChildren(setChildren(tempResourceComponent.getId(),resourceList));
}
}
return rootDirectory;
}
/**
* set children
* @param id id
* @param list resource list
* @return resource component list
*/
public static List<ResourceComponent> setChildren(int id, List<Resource> list ){
List<ResourceComponent> childList = new ArrayList<>();
for (Resource resource : list) {
if (id == resource.getPid()){
ResourceComponent tempResourceComponent = getResourceComponent(resource);
childList.add(tempResourceComponent);
}
}
for (ResourceComponent resourceComponent : childList) {
resourceComponent.setChildren(setChildren(resourceComponent.getId(),list));
}
if (childList.size()==0){
return new ArrayList<>();
}
return childList;
}
/**
* Determine whether it is the root node
* @param resource resource
* @return true if it is the root node
*/
public boolean rootNode(Resource resource) {
boolean isRootNode = true;
if(resource.getPid() != -1 ){
for (Resource parent : resourceList) {
if (resource.getPid() == parent.getId()) {
isRootNode = false;
break;
}
}
}
return isRootNode;
}
/**
* get resource component by resource
* @param resource resource
* @return resource component
*/
private static ResourceComponent getResourceComponent(Resource resource) {
ResourceComponent tempResourceComponent;
if(resource.isDirectory()){
tempResourceComponent = new Directory();
}else{
tempResourceComponent = new FileLeaf();
}
tempResourceComponent.setName(resource.getAlias());
tempResourceComponent.setFullName(resource.getFullName().replaceFirst("/",""));
tempResourceComponent.setId(resource.getId());
tempResourceComponent.setPid(resource.getPid());
tempResourceComponent.setIdValue(resource.getId(),resource.isDirectory());
tempResourceComponent.setDescription(resource.getDescription());
tempResourceComponent.setType(resource.getType());
return tempResourceComponent;
}
}

31
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java

@ -0,0 +1,31 @@
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Visitor
*/
public interface Visitor {
/**
* visit
* @return resource component
*/
ResourceComponent visit();
}

12
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java

@ -97,7 +97,7 @@ public enum Status {
VERIFY_UDF_FUNCTION_NAME_ERROR( 10070,"verify udf function name error", "UDF函数名称验证错误"),
DELETE_UDF_FUNCTION_ERROR( 10071,"delete udf function error", "删除UDF函数错误"),
AUTHORIZED_FILE_RESOURCE_ERROR( 10072,"authorized file resource error", "授权资源文件错误"),
UNAUTHORIZED_FILE_RESOURCE_ERROR( 10073,"unauthorized file resource error", "查询未授权资源错误"),
AUTHORIZE_RESOURCE_TREE( 10073,"authorize resource tree display error","授权资源目录树错误"),
UNAUTHORIZED_UDF_FUNCTION_ERROR( 10074,"unauthorized udf function error", "查询未授权UDF函数错误"),
AUTHORIZED_UDF_FUNCTION_ERROR(10075,"authorized udf function error", "授权UDF函数错误"),
CREATE_SCHEDULE_ERROR(10076,"create schedule error", "创建调度配置错误"),
@ -184,10 +184,12 @@ public enum Status {
RESOURCE_SIZE_EXCEED_LIMIT(20007, "upload resource file size exceeds limit", "上传资源文件大小超过限制"),
RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified", "资源文件后缀不支持修改"),
UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar", "UDF资源文件后缀名只支持[jar]"),
HDFS_COPY_FAIL(20009, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"),
RESOURCE_FILE_EXIST(20010, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"),
RESOURCE_FILE_NOT_EXIST(20011, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"),
HDFS_COPY_FAIL(20010, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"),
RESOURCE_FILE_EXIST(20011, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"),
RESOURCE_FILE_NOT_EXIST(20012, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"),
UDF_RESOURCE_IS_BOUND(20013, "udf resource file is bound by UDF functions:{0}","udf函数绑定了资源文件[{0}]"),
RESOURCE_IS_USED(20014, "resource file is used by process definition","资源文件被上线的流程定义使用了"),
PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist","父资源文件不存在"),
USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"),

43
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/AlertGroupService.java

@ -16,17 +16,17 @@
*/
package org.apache.dolphinscheduler.api.service;
import java.util.*;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AlertType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.AlertGroup;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.entity.UserAlertGroup;
import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper;
import org.apache.dolphinscheduler.dao.mapper.UserAlertGroupMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.slf4j.Logger;
@ -35,11 +35,6 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* alert group service
*/
@ -52,8 +47,7 @@ public class AlertGroupService extends BaseService{
private AlertGroupMapper alertGroupMapper;
@Autowired
private UserAlertGroupMapper userAlertGroupMapper;
private UserAlertGroupService userAlertGroupService;
/**
* query alert group list
*
@ -122,7 +116,7 @@ public class AlertGroupService extends BaseService{
alertGroup.setCreateTime(now);
alertGroup.setUpdateTime(now);
// insert
// insert
int insert = alertGroupMapper.insert(alertGroup);
if (insert > 0) {
@ -199,7 +193,7 @@ public class AlertGroupService extends BaseService{
return result;
}
userAlertGroupMapper.deleteByAlertgroupId(id);
userAlertGroupService.deleteByAlertGroupId(id);
alertGroupMapper.deleteById(id);
putMsg(result, Status.SUCCESS);
return result;
@ -223,22 +217,26 @@ public class AlertGroupService extends BaseService{
return result;
}
userAlertGroupMapper.deleteByAlertgroupId(alertgroupId);
userAlertGroupService.deleteByAlertGroupId(alertgroupId);
if (StringUtils.isEmpty(userIds)) {
putMsg(result, Status.SUCCESS);
return result;
}
String[] userIdsArr = userIds.split(",");
Date now = new Date();
List<UserAlertGroup> alertGroups = new ArrayList<>(userIds.length());
for (String userId : userIdsArr) {
Date now = new Date();
UserAlertGroup userAlertGroup = new UserAlertGroup();
userAlertGroup.setAlertgroupId(alertgroupId);
userAlertGroup.setUserId(Integer.parseInt(userId));
userAlertGroup.setCreateTime(now);
userAlertGroup.setUpdateTime(now);
userAlertGroupMapper.insert(userAlertGroup);
alertGroups.add(userAlertGroup);
}
if (CollectionUtils.isNotEmpty(alertGroups)) {
userAlertGroupService.saveBatch(alertGroups);
}
putMsg(result, Status.SUCCESS);
@ -248,22 +246,11 @@ public class AlertGroupService extends BaseService{
/**
* verify group name exists
*
* @param loginUser login user
* @param groupName group name
* @return check result code
*/
public Result verifyGroupName(User loginUser, String groupName) {
Result result = new Result();
public boolean existGroupName(String groupName) {
List<AlertGroup> alertGroup = alertGroupMapper.queryByGroupName(groupName);
if (alertGroup != null && alertGroup.size() > 0) {
logger.error("group {} has exist, can't create again.", groupName);
result.setCode(Status.ALERT_GROUP_EXIST.getCode());
result.setMsg(Status.ALERT_GROUP_EXIST.getMsg());
} else {
result.setCode(Status.SUCCESS.getCode());
result.setMsg(Status.SUCCESS.getMsg());
}
return result;
return CollectionUtils.isNotEmpty(alertGroup);
}
}

32
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java

@ -17,10 +17,15 @@
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
@ -30,10 +35,6 @@ import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
@ -473,12 +474,19 @@ public class DataSourceService extends BaseService{
* @return datasource parameter
*/
public String buildParameter(String name, String desc, DbType type, String host,
String port, String database,String principal,String userName,
String password, String other) {
String port, String database, String principal, String userName,
String password, DbConnectType connectType, String other) {
String address = buildAddress(type, host, port, connectType);
String address = buildAddress(type, host, port);
String jdbcUrl;
if (Constants.ORACLE.equals(type.name())
&& connectType == DbConnectType.ORACLE_SID) {
jdbcUrl = address + ":" + database;
} else {
jdbcUrl = address + "/" + database;
}
String jdbcUrl = address + "/" + database;
if (CommonUtils.getKerberosStartupState() &&
(type == DbType.HIVE || type == DbType.SPARK)){
jdbcUrl += ";principal=" + principal;
@ -531,7 +539,7 @@ public class DataSourceService extends BaseService{
}
private String buildAddress(DbType type, String host, String port) {
private String buildAddress(DbType type, String host, String port, DbConnectType connectType) {
StringBuilder sb = new StringBuilder();
if (Constants.MYSQL.equals(type.name())) {
sb.append(Constants.JDBC_MYSQL);
@ -552,7 +560,11 @@ public class DataSourceService extends BaseService{
sb.append(Constants.JDBC_CLICKHOUSE);
sb.append(host).append(":").append(port);
} else if (Constants.ORACLE.equals(type.name())) {
sb.append(Constants.JDBC_ORACLE);
if (connectType == DbConnectType.ORACLE_SID) {
sb.append(Constants.JDBC_ORACLE_SID);
} else {
sb.append(Constants.JDBC_ORACLE_SERVICE_NAME);
}
sb.append(host).append(":").append(port);
} else if (Constants.SQLSERVER.equals(type.name())) {
sb.append(Constants.JDBC_SQLSERVER);

7
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java

@ -98,7 +98,7 @@ public class ExecutorService extends BaseService{
String receivers, String receiversCc, RunMode runMode,
Priority processInstancePriority, int workerGroupId, Integer timeout) throws ParseException {
Map<String, Object> result = new HashMap<>(5);
// timeout is valid
// timeout is invalid
if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) {
putMsg(result,Status.TASK_TIMEOUT_PARAMS_ERROR);
return result;
@ -453,7 +453,7 @@ public class ExecutorService extends BaseService{
TaskDependType nodeDep, FailureStrategy failureStrategy,
String startNodeList, String schedule, WarningType warningType,
int excutorId, int warningGroupId,
RunMode runMode,Priority processInstancePriority, int workerGroupId) throws ParseException {
RunMode runMode,Priority processInstancePriority, int workerGroupId){
/**
* instantiate command schedule instance
@ -496,6 +496,7 @@ public class ExecutorService extends BaseService{
}
}
// determine whether to complement
if(commandType == CommandType.COMPLEMENT_DATA){
runMode = (runMode == null) ? RunMode.RUN_MODE_SERIAL : runMode;
if(null != start && null != end && start.before(end)){
@ -540,7 +541,7 @@ public class ExecutorService extends BaseService{
processDefineId, schedule);
}
}else{
command.setCommandParam(JSONUtils.toJson(cmdParam));
return processService.createCommand(command);
}

56
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java

@ -38,11 +38,9 @@ import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
@ -148,7 +146,7 @@ public class ProcessDefinitionService extends BaseDAGService {
//custom global params
List<Property> globalParamsList = processData.getGlobalParams();
if (globalParamsList != null && globalParamsList.size() > 0) {
if (CollectionUtils.isNotEmpty(globalParamsList)) {
Set<Property> globalParamsSet = new HashSet<>(globalParamsList);
globalParamsList = new ArrayList<>(globalParamsSet);
processDefine.setGlobalParamList(globalParamsList);
@ -162,6 +160,31 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
/**
* get resource ids
* @param processData process data
* @return resource ids
*/
private String getResourceIds(ProcessData processData) {
List<TaskNode> tasks = processData.getTasks();
Set<Integer> resourceIds = new HashSet<>();
for(TaskNode taskNode : tasks){
String taskParameter = taskNode.getParams();
AbstractParameters params = TaskParametersUtils.getParameters(taskNode.getType(),taskParameter);
Set<Integer> tempSet = params.getResourceFilesList().stream().map(t->t.getId()).collect(Collectors.toSet());
resourceIds.addAll(tempSet);
}
StringBuilder sb = new StringBuilder();
for(int i : resourceIds) {
if (sb.length() > 0) {
sb.append(",");
}
sb.append(i);
}
return sb.toString();
}
/**
* query proccess definition list
@ -284,20 +307,19 @@ public class ProcessDefinitionService extends BaseDAGService {
if ((checkProcessJson.get(Constants.STATUS) != Status.SUCCESS)) {
return checkProcessJson;
}
ProcessDefinition processDefinition = processService.findProcessDefineById(id);
if (processDefinition == null) {
ProcessDefinition processDefine = processService.findProcessDefineById(id);
if (processDefine == null) {
// check process definition exists
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, id);
return result;
} else if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
} else if (processDefine.getReleaseState() == ReleaseState.ONLINE) {
// online can not permit edit
putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName());
putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefine.getName());
return result;
} else {
putMsg(result, Status.SUCCESS);
}
ProcessDefinition processDefine = processService.findProcessDefineById(id);
Date now = new Date();
processDefine.setId(id);
@ -314,7 +336,7 @@ public class ProcessDefinitionService extends BaseDAGService {
//custom global params
List<Property> globalParamsList = new ArrayList<>();
if (processData.getGlobalParams() != null && processData.getGlobalParams().size() > 0) {
if (CollectionUtils.isNotEmpty(processData.getGlobalParams())) {
Set<Property> userDefParamsSet = new HashSet<>(processData.getGlobalParams());
globalParamsList = new ArrayList<>(userDefParamsSet);
}
@ -453,12 +475,11 @@ public class ProcessDefinitionService extends BaseDAGService {
ProcessDefinition processDefinition = processDefineMapper.selectById(id);
switch (state) {
case ONLINE: {
case ONLINE:
processDefinition.setReleaseState(state);
processDefineMapper.updateById(processDefinition);
break;
}
case OFFLINE: {
case OFFLINE:
processDefinition.setReleaseState(state);
processDefineMapper.updateById(processDefinition);
List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray(
@ -473,11 +494,9 @@ public class ProcessDefinitionService extends BaseDAGService {
SchedulerService.deleteSchedule(project.getId(), schedule.getId());
}
break;
}
default: {
default:
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "releaseState");
return result;
}
}
putMsg(result, Status.SUCCESS);
@ -949,7 +968,9 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
@ -1166,6 +1187,7 @@ public class ProcessDefinitionService extends BaseDAGService {
private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) throws Exception {
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//check process data

23
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java

@ -204,14 +204,8 @@ public class ProcessInstanceService extends BaseDAGService {
}
}
Set<String> exclusionSet = new HashSet<>();
exclusionSet.add(Constants.CLASS);
exclusionSet.add("locations");
exclusionSet.add("connects");
exclusionSet.add("processInstanceJson");
pageInfo.setTotalCount((int) processInstanceList.getTotal());
pageInfo.setLists(CollectionUtils.getListByExclusion(processInstances, exclusionSet));
pageInfo.setLists(processInstances);
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
return result;
@ -239,7 +233,7 @@ public class ProcessInstanceService extends BaseDAGService {
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processId);
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processId);
AddDependResultForTaskList(taskInstanceList);
addDependResultForTaskList(taskInstanceList);
Map<String, Object> resultMap = new HashMap<>();
resultMap.put(PROCESS_INSTANCE_STATE, processInstance.getState().toString());
resultMap.put(TASK_LIST, taskInstanceList);
@ -253,9 +247,9 @@ public class ProcessInstanceService extends BaseDAGService {
* add dependent result for dependent task
* @param taskInstanceList
*/
private void AddDependResultForTaskList(List<TaskInstance> taskInstanceList) throws IOException {
private void addDependResultForTaskList(List<TaskInstance> taskInstanceList) throws IOException {
for(TaskInstance taskInstance: taskInstanceList){
if(taskInstance.getTaskType().toUpperCase().equals(TaskType.DEPENDENT.toString())){
if(taskInstance.getTaskType().equalsIgnoreCase(TaskType.DEPENDENT.toString())){
Result logResult = loggerService.queryLog(
taskInstance.getId(), 0, 4098);
if(logResult.getCode() == Status.SUCCESS.ordinal()){
@ -414,11 +408,10 @@ public class ProcessInstanceService extends BaseDAGService {
processInstance.setProcessInstanceJson(processInstanceJson);
processInstance.setGlobalParams(globalParams);
}
// int update = processDao.updateProcessInstance(processInstanceId, processInstanceJson,
// globalParams, schedule, flag, locations, connects);
int update = processService.updateProcessInstance(processInstance);
int updateDefine = 1;
if (syncDefine && StringUtils.isNotEmpty(processInstanceJson)) {
if (Boolean.TRUE.equals(syncDefine) && StringUtils.isNotEmpty(processInstanceJson)) {
processDefinition.setProcessDefinitionJson(processInstanceJson);
processDefinition.setGlobalParams(originDefParams);
processDefinition.setLocations(locations);
@ -544,7 +537,7 @@ public class ProcessInstanceService extends BaseDAGService {
nodeValueSb.append(ipSb);
}
logger.info("delete task queue node : {}",nodeValueSb.toString());
logger.info("delete task queue node : {}",nodeValueSb);
tasksQueue.removeNode(org.apache.dolphinscheduler.common.Constants.DOLPHINSCHEDULER_TASKS_QUEUE, nodeValueSb.toString());
}
@ -621,7 +614,7 @@ public class ProcessInstanceService extends BaseDAGService {
Map<String,Object> localParamsMap = new HashMap<>();
localParamsMap.put("taskType",taskNode.getType());
localParamsMap.put("localParamsList",localParamsList);
if (localParamsList.size() > 0) {
if (CollectionUtils.isNotEmpty(localParamsList)) {
localUserDefParams.put(taskNode.getName(), localParamsMap);
}
}

26
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/QueueService.java

@ -20,6 +20,7 @@ import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.dao.entity.Queue;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.QueueMapper;
@ -43,7 +44,7 @@ import java.util.Map;
@Service
public class QueueService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(TenantService.class);
private static final Logger logger = LoggerFactory.getLogger(QueueService.class);
@Autowired
private QueueMapper queueMapper;
@ -186,19 +187,16 @@ public class QueueService extends BaseService {
}
// check queue name is exist
if (!queueName.equals(queueObj.getQueueName())) {
if (checkQueueNameExist(queueName)) {
putMsg(result, Status.QUEUE_NAME_EXIST, queueName);
return result;
}
if (!queueName.equals(queueObj.getQueueName())
&& checkQueueNameExist(queueName)) {
putMsg(result, Status.QUEUE_NAME_EXIST, queueName);
return result;
}
// check queue value is exist
if (!queue.equals(queueObj.getQueue())) {
if (checkQueueExist(queue)) {
putMsg(result, Status.QUEUE_VALUE_EXIST, queue);
return result;
}
if (!queue.equals(queueObj.getQueue()) && checkQueueExist(queue)) {
putMsg(result, Status.QUEUE_VALUE_EXIST, queue);
return result;
}
// check old queue using by any user
@ -267,7 +265,7 @@ public class QueueService extends BaseService {
* @return true if the queue not exists, otherwise return false
*/
private boolean checkQueueExist(String queue) {
return queueMapper.queryAllQueueList(queue, null).size() > 0;
return CollectionUtils.isNotEmpty(queueMapper.queryAllQueueList(queue, null));
}
/**
@ -278,7 +276,7 @@ public class QueueService extends BaseService {
* @return true if the queue name not exists, otherwise return false
*/
private boolean checkQueueNameExist(String queueName) {
return queueMapper.queryAllQueueList(null, queueName).size() > 0;
return CollectionUtils.isNotEmpty(queueMapper.queryAllQueueList(null, queueName));
}
/**
@ -290,7 +288,7 @@ public class QueueService extends BaseService {
* @return true if need to update user
*/
private boolean checkIfQueueIsInUsing (String oldQueue, String newQueue) {
return !oldQueue.equals(newQueue) && userMapper.queryUserListByQueue(oldQueue).size() > 0;
return !oldQueue.equals(newQueue) && CollectionUtils.isNotEmpty(userMapper.queryUserListByQueue(oldQueue));
}
}

566
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java

@ -16,18 +16,21 @@
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
@ -42,6 +45,7 @@ import org.springframework.web.multipart.MultipartFile;
import java.text.MessageFormat;
import java.util.*;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
@ -68,6 +72,82 @@ public class ResourcesService extends BaseService {
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
@ -76,6 +156,8 @@ public class ResourcesService extends BaseService {
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
@ -83,7 +165,9 @@ public class ResourcesService extends BaseService {
String name,
String desc,
ResourceType type,
MultipartFile file) {
MultipartFile file,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
@ -126,7 +210,8 @@ public class ResourcesService extends BaseService {
}
// check resoure name exists
if (checkResourceExists(name, 0, type.ordinal())) {
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
@ -134,14 +219,16 @@ public class ResourcesService extends BaseService {
Date now = new Date();
Resource resource = new Resource(name,file.getOriginalFilename(),desc,loginUser.getId(),type,file.getSize(),now,now);
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
@ -154,7 +241,7 @@ public class ResourcesService extends BaseService {
}
// fail upload
if (!upload(loginUser, name, file, type)) {
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
@ -165,14 +252,14 @@ public class ResourcesService extends BaseService {
/**
* check resource is exists
*
* @param alias alias
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String alias, int userId, int type ){
private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(alias, userId, type);
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
@ -180,16 +267,14 @@ public class ResourcesService extends BaseService {
}
/**
* update resource
*
* @param loginUser login user
* @param name alias
* @param resourceId resource id
* @param type resource type
* @param desc description
* @return update result code
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
@ -223,7 +308,10 @@ public class ResourcesService extends BaseService {
}
//check resource aleady exists
if (!resource.getAlias().equals(name) && checkResourceExists(name, 0, type.ordinal())) {
String originFullName = resource.getFullName();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!resource.getAlias().equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
@ -234,25 +322,41 @@ public class ResourcesService extends BaseService {
if (StringUtils.isEmpty(tenantCode)){
return result;
}
//get the file suffix
String nameWithSuffix = name;
String originResourceName = resource.getAlias();
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
if (!resource.isDirectory()) {
//get the file suffix
//if the name without suffix then add it ,else use the origin name
String nameWithSuffix = name;
if(!name.endsWith(suffix)){
nameWithSuffix = nameWithSuffix + suffix;
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
//if the name without suffix then add it ,else use the origin name
if(!name.endsWith(suffix)){
nameWithSuffix = nameWithSuffix + suffix;
}
}
// updateResource data
List<Integer> childrenResource = listAllChildren(resource);
String oldFullName = resource.getFullName();
Date now = new Date();
resource.setAlias(nameWithSuffix);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory() && CollectionUtils.isNotEmpty(childrenResource)) {
List<Resource> childResourceList = new ArrayList<>();
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(oldFullName, fullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
@ -274,15 +378,9 @@ public class ResourcesService extends BaseService {
// get file hdfs path
// delete hdfs file by type
String originHdfsFileName = "";
String destHdfsFileName = "";
if (resource.getType().equals(ResourceType.FILE)) {
originHdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, originResourceName);
destHdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, name);
} else if (resource.getType().equals(ResourceType.UDF)) {
originHdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, originResourceName);
destHdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, name);
}
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
if (HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.info("hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
@ -310,7 +408,7 @@ public class ResourcesService extends BaseService {
* @param pageSize page size
* @return resource list page
*/
public Map<String, Object> queryResourceListPaging(User loginUser, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize);
@ -319,7 +417,7 @@ public class ResourcesService extends BaseService {
userId= 0;
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId, type.ordinal(), searchVal);
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
@ -328,17 +426,46 @@ public class ResourcesService extends BaseService {
return result;
}
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser
* @param name
* @param file
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String name, MultipartFile file, ResourceType type) {
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
@ -351,15 +478,8 @@ public class ResourcesService extends BaseService {
// save file to hdfs, and delete original file
String hdfsFilename = "";
String resourcePath = "";
if (type.equals(ResourceType.FILE)) {
hdfsFilename = HadoopUtils.getHdfsFilename(tenantCode, name);
resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
} else if (type.equals(ResourceType.UDF)) {
hdfsFilename = HadoopUtils.getHdfsUdfFilename(tenantCode, name);
resourcePath = HadoopUtils.getHdfsUdfDir(tenantCode);
}
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
@ -384,13 +504,59 @@ public class ResourcesService extends BaseService {
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
List<Resource> resourceList;
Set<Resource> allResourceList = getAllResources(loginUser, type);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(new ArrayList<>(allResourceList));
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get all resources
* @param loginUser login user
* @return all resource set
*/
private Set<Resource> getAllResources(User loginUser, ResourceType type) {
int userId = loginUser.getId();
boolean listChildren = true;
if(isAdmin(loginUser)){
userId = 0;
listChildren = false;
}
List<Resource> resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
Set<Resource> allResourceList = new HashSet<>(resourceList);
if (listChildren) {
Set<Integer> authorizedIds = new HashSet<>();
List<Resource> authorizedDirecoty = resourceList.stream().filter(t->t.getUserId() != loginUser.getId() && t.isDirectory()).collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(authorizedDirecoty)) {
for(Resource resource : authorizedDirecoty){
authorizedIds.addAll(listAllChildren(resource));
}
List<Resource> childrenResources = resourcesMapper.listResourceByIds(authorizedIds.toArray(new Integer[authorizedIds.size()]));
allResourceList.addAll(childrenResources);
}
}
resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
result.put(Constants.DATA_LIST, resourceList);
return allResourceList;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
Set<Resource> allResourceList = getAllResources(loginUser, type);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
@ -426,23 +592,51 @@ public class ResourcesService extends BaseService {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new int[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if (tenant == null){
putMsg(result, Status.TENANT_NOT_EXIST);
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// get all resource id of process definitions those is released
Map<Integer, Set<Integer>> resourceProcessMap = getResourceProcessMap();
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource);
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
String hdfsFilename = "";
// delete hdfs file by type
String tenantCode = tenant.getTenantCode();
hdfsFilename = getHdfsFileName(resource, tenantCode, hdfsFilename);
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteById(resourceId);
resourcesMapper.deleteIds(allChildren.toArray(new Integer[allChildren.size()]));
resourceUserMapper.deleteResourceUser(0, resourceId);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, false);
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
@ -451,15 +645,15 @@ public class ResourcesService extends BaseService {
/**
* verify resource by name and type
* @param loginUser login user
* @param name resource alias
* @param type resource type
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
public Result verifyResourceName(String name, ResourceType type,User loginUser) {
public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(name, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
@ -468,9 +662,9 @@ public class ResourcesService extends BaseService {
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = getHdfsFileName(type,tenantCode,name);
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, name,hdfsFilename);
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
@ -487,6 +681,48 @@ public class ResourcesService extends BaseService {
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
@ -508,7 +744,7 @@ public class ResourcesService extends BaseService {
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resouce file not exist, resource id {}", resourceId);
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
@ -518,7 +754,7 @@ public class ResourcesService extends BaseService {
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support view, resource id {}", nameSuffix, resourceId);
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
@ -530,7 +766,7 @@ public class ResourcesService extends BaseService {
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias());
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
@ -566,7 +802,7 @@ public class ResourcesService extends BaseService {
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content) {
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
@ -588,15 +824,16 @@ public class ResourcesService extends BaseService {
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(name,type,loginUser);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(name,name,desc,loginUser.getId(),type,content.getBytes().length,now,now);
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
@ -612,7 +849,7 @@ public class ResourcesService extends BaseService {
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(name, tenantCode, content);
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
@ -664,7 +901,7 @@ public class ResourcesService extends BaseService {
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getAlias(), tenantCode, content);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
@ -672,10 +909,10 @@ public class ResourcesService extends BaseService {
}
/**
* @param resourceName
* @param tenantCode
* @param content
* @return
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
@ -691,8 +928,8 @@ public class ResourcesService extends BaseService {
return result;
}
// get file hdfs path
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resourceName);
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
@ -736,21 +973,50 @@ public class ResourcesService extends BaseService {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
String hdfsFileName = "";
hdfsFileName = getHdfsFileName(resource, tenantCode, hdfsFileName);
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getAlias());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
org.springframework.core.io.Resource file = org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
return file;
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
@ -765,7 +1031,7 @@ public class ResourcesService extends BaseService {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Object> list ;
List<Resource> list ;
if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
@ -775,15 +1041,12 @@ public class ResourcesService extends BaseService {
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
@ -801,7 +1064,7 @@ public class ResourcesService extends BaseService {
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet = null;
if (udfFuncList != null && udfFuncList.size() > 0) {
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
@ -849,46 +1112,15 @@ public class ResourcesService extends BaseService {
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
result.put(Constants.DATA_LIST, authedResources);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get hdfs file name
*
* @param resource resource
* @param tenantCode tenant code
* @param hdfsFileName hdfs file name
* @return hdfs file name
*/
private String getHdfsFileName(Resource resource, String tenantCode, String hdfsFileName) {
if (resource.getType().equals(ResourceType.FILE)) {
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias());
} else if (resource.getType().equals(ResourceType.UDF)) {
hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, resource.getAlias());
}
return hdfsFileName;
}
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param hdfsFileName hdfs file name
* @return hdfs file name
*/
private String getHdfsFileName(ResourceType resourceType, String tenantCode, String hdfsFileName) {
if (resourceType.equals(ResourceType.FILE)) {
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, hdfsFileName);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, hdfsFileName);
}
return hdfsFileName;
}
/**
* get authorized resource list
*
@ -897,10 +1129,9 @@ public class ResourcesService extends BaseService {
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet = null;
if (authedResourceList != null && authedResourceList.size() > 0) {
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
@ -929,4 +1160,69 @@ public class ResourcesService extends BaseService {
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @return all children id
*/
List<Integer> listAllChildren(Resource resource){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
/**
* get resource process map key is resource id,value is the set of process definition
* @return resource process definition map
*/
private Map<Integer,Set<Integer>> getResourceProcessMap(){
Map<Integer, String> map = new HashMap<>();
Map<Integer, Set<Integer>> result = new HashMap<>();
List<Map<String, Object>> list = processDefinitionMapper.listResources();
if (CollectionUtils.isNotEmpty(list)) {
for (Map<String, Object> tempMap : list) {
map.put((Integer) tempMap.get("id"), (String)tempMap.get("resource_ids"));
}
}
for (Map.Entry<Integer, String> entry : map.entrySet()) {
Integer mapKey = entry.getKey();
String[] arr = entry.getValue().split(",");
Set<Integer> mapValues = Arrays.stream(arr).map(Integer::parseInt).collect(Collectors.toSet());
for (Integer value : mapValues) {
if (result.containsKey(value)) {
Set<Integer> set = result.get(value);
set.add(mapKey);
result.put(value, set);
} else {
Set<Integer> set = new HashSet<>();
set.add(mapKey);
result.put(value, set);
}
}
}
return result;
}
}

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java

@ -32,8 +32,6 @@ import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@ -46,8 +44,6 @@ import java.util.*;
@Service
public class TaskInstanceService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(TaskInstanceService.class);
@Autowired
ProjectMapper projectMapper;

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskRecordService.java

@ -21,8 +21,6 @@ import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.dao.TaskRecordDao;
import org.apache.dolphinscheduler.dao.entity.TaskRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import java.util.HashMap;
@ -37,8 +35,6 @@ import static org.apache.dolphinscheduler.common.Constants.*;
@Service
public class TaskRecordService extends BaseService{
private static final Logger logger = LoggerFactory.getLogger(TaskRecordService.class);
/**
* query task record list paging
*

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TenantService.java

@ -310,7 +310,7 @@ public class TenantService extends BaseService{
Map<String, Object> result = new HashMap<>(5);
List<Tenant> resourceList = tenantMapper.queryByTenantCode(tenantCode);
if (resourceList != null && resourceList.size() > 0) {
if (CollectionUtils.isNotEmpty(resourceList)) {
result.put(Constants.DATA_LIST, resourceList);
putMsg(result, Status.SUCCESS);
} else {
@ -346,6 +346,6 @@ public class TenantService extends BaseService{
*/
private boolean checkTenantExists(String tenantCode) {
List<Tenant> tenants = tenantMapper.queryByTenantCode(tenantCode);
return (tenants != null && tenants.size() > 0);
return CollectionUtils.isNotEmpty(tenants);
}
}

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java

@ -118,7 +118,7 @@ public class UdfFuncService extends BaseService{
}
udf.setDescription(desc);
udf.setResourceId(resourceId);
udf.setResourceName(resource.getAlias());
udf.setResourceName(resource.getFullName());
udf.setType(type);
udf.setCreateTime(now);
@ -226,7 +226,7 @@ public class UdfFuncService extends BaseService{
}
udf.setDescription(desc);
udf.setResourceId(resourceId);
udf.setResourceName(resource.getAlias());
udf.setResourceName(resource.getFullName());
udf.setType(type);
udf.setUpdateTime(now);

38
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UserAlertGroupService.java

@ -0,0 +1,38 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import org.apache.dolphinscheduler.dao.entity.UserAlertGroup;
import org.apache.dolphinscheduler.dao.mapper.UserAlertGroupMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
*
*/
@Service
public class UserAlertGroupService extends ServiceImpl<UserAlertGroupMapper, UserAlertGroup> {
@Autowired
private UserAlertGroupMapper userAlertGroupMapper;
boolean deleteByAlertGroupId(Integer groupId) {
return userAlertGroupMapper.deleteByAlertgroupId(groupId) >= 1;
}
}

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java

@ -111,7 +111,7 @@ public class WorkerGroupService extends BaseService {
List<WorkerGroup> workerGroupList = workerGroupMapper.queryWorkerGroupByName(workerGroup.getName());
if(workerGroupList.size() > 0 ){
if(CollectionUtils.isNotEmpty(workerGroupList)){
// new group has same name..
if(workerGroup.getId() == 0){
return true;

4
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java

@ -39,6 +39,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
* data source controller test
*/
public class DataSourceControllerTest extends AbstractControllerTest{
private static Logger logger = LoggerFactory.getLogger(DataSourceControllerTest.class);
@Ignore
@ -95,6 +96,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
@Ignore
@Test
public void testQueryDataSource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
@ -169,6 +171,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
}
@Ignore
@Test
public void testConnectionTest() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
@ -248,6 +251,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
@Ignore
@Test
public void testDelete() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();

2
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/LoginControllerTest.java

@ -36,7 +36,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
* login controller test
*/
public class LoginControllerTest extends AbstractControllerTest{
private static Logger logger = LoggerFactory.getLogger(SchedulerControllerTest.class);
private static Logger logger = LoggerFactory.getLogger(LoginControllerTest.class);
@Test

2
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/TaskRecordControllerTest.java

@ -33,7 +33,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
public class TaskRecordControllerTest extends AbstractControllerTest {
private static final Logger logger = LoggerFactory.getLogger(TaskInstanceController.class);
private static final Logger logger = LoggerFactory.getLogger(TaskRecordControllerTest.class);
@Test
public void testQueryTaskRecordListPaging() throws Exception {

2
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/UsersControllerTest.java

@ -37,7 +37,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
* users controller test
*/
public class UsersControllerTest extends AbstractControllerTest{
private static Logger logger = LoggerFactory.getLogger(QueueControllerTest.class);
private static Logger logger = LoggerFactory.getLogger(UsersControllerTest.class);
@Test
public void testCreateUser() throws Exception {

58
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java

@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* resource filter test
*/
public class ResourceFilterTest {
private static Logger logger = LoggerFactory.getLogger(ResourceFilterTest.class);
@Test
public void filterTest(){
List<Resource> allList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
allList.add(resource1);
allList.add(resource2);
allList.add(resource3);
allList.add(resource4);
allList.add(resource5);
allList.add(resource6);
allList.add(resource7);
ResourceFilter resourceFilter = new ResourceFilter(".jar",allList);
List<Resource> resourceList = resourceFilter.filter();
Assert.assertNotNull(resourceList);
resourceList.stream().forEach(t-> logger.info(t.toString()));
}
}

82
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java

@ -0,0 +1,82 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
/**
* resource tree visitor test
*/
public class ResourceTreeVisitorTest {
@Test
public void visit() throws Exception {
List<Resource> resourceList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
resourceList.add(resource1);
resourceList.add(resource2);
resourceList.add(resource3);
resourceList.add(resource4);
resourceList.add(resource5);
resourceList.add(resource6);
resourceList.add(resource7);
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(resourceList);
ResourceComponent resourceComponent = resourceTreeVisitor.visit();
Assert.assertNotNull(resourceComponent.getChildren());
}
@Test
public void rootNode() throws Exception {
List<Resource> resourceList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
resourceList.add(resource1);
resourceList.add(resource2);
resourceList.add(resource3);
resourceList.add(resource4);
resourceList.add(resource5);
resourceList.add(resource6);
resourceList.add(resource7);
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(resourceList);
Assert.assertTrue(resourceTreeVisitor.rootNode(resource1));
Assert.assertTrue(resourceTreeVisitor.rootNode(resource2));
Assert.assertFalse(resourceTreeVisitor.rootNode(resource3));
}
}

2
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/enums/StatusTest.java

@ -28,7 +28,7 @@ public class StatusTest {
@Test
public void testGetCode() {
assertEquals(Status.SUCCESS.getCode(), 0);
assertEquals(0, Status.SUCCESS.getCode());
assertNotEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR.getCode(), 0);
}

47
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/AlertGroupServiceTest.java

@ -18,9 +18,12 @@ package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AlertType;
import org.apache.dolphinscheduler.common.enums.UserType;
@ -31,9 +34,12 @@ import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper;
import org.apache.dolphinscheduler.dao.mapper.UserAlertGroupMapper;
import org.junit.After;
import org.junit.Assert;
import static org.junit.Assert.assertEquals;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import static org.mockito.ArgumentMatchers.*;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
@ -41,14 +47,6 @@ import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
@RunWith(MockitoJUnitRunner.class)
public class AlertGroupServiceTest {
@ -60,6 +58,8 @@ public class AlertGroupServiceTest {
private AlertGroupMapper alertGroupMapper;
@Mock
private UserAlertGroupMapper userAlertGroupMapper;
@Mock
UserAlertGroupService userAlertGroupService;
private String groupName = "AlertGroupServiceTest";
@ -160,25 +160,34 @@ public class AlertGroupServiceTest {
}
@Test
public void testGrantUser(){
public void testGrantUser() {
Integer groupId = 1;
ArgumentCaptor<Integer> groupArgument = ArgumentCaptor.forClass(Integer.class);
Mockito.when(userAlertGroupService.deleteByAlertGroupId(anyInt())).thenReturn(true);
Map<String, Object> result = alertGroupService.grantUser(getLoginUser(), groupId, "123,321");
Mockito.verify(userAlertGroupService).deleteByAlertGroupId(groupArgument.capture());
Map<String, Object> result = alertGroupService.grantUser(getLoginUser(),1,"123,321");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
assertEquals(groupArgument.getValue(), groupId);
assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testVerifyGroupName(){
public void testVerifyGroupName() {
//group name not exist
Result result = alertGroupService.verifyGroupName(getLoginUser(), groupName);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
boolean result = alertGroupService.existGroupName(groupName);
Assert.assertFalse(result);
Mockito.when(alertGroupMapper.queryByGroupName(groupName)).thenReturn(getList());
//group name exist
result = alertGroupService.verifyGroupName(getLoginUser(), groupName);
logger.info(result.toString());
Assert.assertEquals(Status.ALERT_GROUP_EXIST.getMsg(),result.getMsg());
result = alertGroupService.existGroupName(groupName);
Assert.assertTrue(result);
}

112
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java

@ -24,10 +24,7 @@ import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
@ -40,6 +37,7 @@ import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.omg.CORBA.Any;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
@ -73,6 +71,8 @@ public class ResourcesServiceTest {
private UserMapper userMapper;
@Mock
private UdfFuncMapper udfFunctionMapper;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
@Before
public void setUp() {
@ -96,14 +96,14 @@ public class ResourcesServiceTest {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null);
Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_FILE_IS_EMPTY
MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf",new String().getBytes());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile);
result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(),result.getMsg());
@ -111,31 +111,42 @@ public class ResourcesServiceTest {
mockMultipartFile = new MockMultipartFile("test.pdf","test.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("test.pdf")).thenReturn("pdf");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile);
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf","ResourcesServiceTest.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.pdf")).thenReturn("pdf");
result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR
Mockito.when(tenantMapper.queryById(0)).thenReturn(getTenant());
Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest.jar", 0, 1)).thenReturn(getResourceList());
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.jar","ResourcesServiceTest.jar","pdf",new String("test").getBytes());
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
}
@Test
public void testCreateDirecotry(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//SUCCESS
Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest.jar", 0, 1)).thenReturn(new ArrayList<>());
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
//PARENT_RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
Assert.assertEquals(Status.PARENT_RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.queryResourceList("/directoryTest", 0, 0)).thenReturn(getResourceList());
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
}
@ -163,41 +174,46 @@ public class ResourcesServiceTest {
//SUCCESS
user.setId(1);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest.jar",ResourceType.FILE);
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
//RESOURCE_EXIST
Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.FILE);
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
//USER_NOT_EXIST
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsFilename(Mockito.any(), Mockito.any())).thenReturn("test1");
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test1");
try {
Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
} catch (IOException e) {
e.printStackTrace();
}
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//SUCCESS
PowerMockito.when(HadoopUtils.getHdfsFilename(Mockito.any(), Mockito.any())).thenReturn("test");
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test");
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
@ -212,8 +228,8 @@ public class ResourcesServiceTest {
resourcePage.setTotal(1);
resourcePage.setRecords(getResourceList());
Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class),
Mockito.eq(0), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
Map<String, Object> result = resourcesService.queryResourceListPaging(loginUser,ResourceType.FILE,"test",1,10);
Mockito.eq(0),Mockito.eq(-1), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
Map<String, Object> result = resourcesService.queryResourceListPaging(loginUser,-1,ResourceType.FILE,"test",1,10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
PageInfo pageInfo = (PageInfo) result.get(Constants.DATA_LIST);
@ -263,6 +279,7 @@ public class ResourcesServiceTest {
//TENANT_NOT_EXIST
loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setTenantId(2);
Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(loginUser);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
@ -285,14 +302,20 @@ public class ResourcesServiceTest {
User user = new User();
user.setId(1);
Mockito.when(resourcesMapper.queryResourceList("test", 0, 0)).thenReturn(getResourceList());
Result result = resourcesService.verifyResourceName("test",ResourceType.FILE,user);
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest.jar", 0, 0)).thenReturn(getResourceList());
Result result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.verifyResourceName("test1",ResourceType.FILE,user);
String unExistFullName = "/test.jar";
try {
Mockito.when(hadoopUtils.exists(unExistFullName)).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.verifyResourceName("/test.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
@ -304,10 +327,10 @@ public class ResourcesServiceTest {
} catch (IOException e) {
logger.error("hadoop error",e);
}
PowerMockito.when(HadoopUtils.getHdfsFilename("123", "test1")).thenReturn("test");
result = resourcesService.verifyResourceName("test1",ResourceType.FILE,user);
PowerMockito.when(HadoopUtils.getHdfsResourceFileName("123", "test1")).thenReturn("test");
result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_FILE_EXIST.getCode()==result.getCode());
Assert.assertTrue(Status.RESOURCE_EXIST.getCode()==result.getCode());
//SUCCESS
result = resourcesService.verifyResourceName("test2",ResourceType.FILE,user);
@ -389,14 +412,14 @@ public class ResourcesServiceTest {
PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir");
User user = getUser();
//HDFS_NOT_STARTUP
Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content");
Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
@ -404,7 +427,7 @@ public class ResourcesServiceTest {
try {
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content");
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content",-1,"/");
}catch (RuntimeException ex){
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage());
@ -413,7 +436,7 @@ public class ResourcesServiceTest {
//SUCCESS
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
@ -584,13 +607,26 @@ public class ResourcesServiceTest {
private Resource getResource(){
Resource resource = new Resource();
resource.setPid(-1);
resource.setUserId(1);
resource.setDescription("ResourcesServiceTest.jar");
resource.setAlias("ResourcesServiceTest.jar");
resource.setFullName("/ResourcesServiceTest.jar");
resource.setType(ResourceType.FILE);
return resource;
}
private Resource getUdfResource(){
Resource resource = new Resource();
resource.setUserId(1);
resource.setDescription("udfTest");
resource.setAlias("udfTest.jar");
resource.setFullName("/udfTest.jar");
resource.setType(ResourceType.UDF);
return resource;
}
private UdfFunc getUdfFunc(){
UdfFunc udfFunc = new UdfFunc();

53
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/UserAlertGroupServiceTest.java

@ -0,0 +1,53 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.dao.mapper.UserAlertGroupMapper;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
/**
*
*/
@RunWith(MockitoJUnitRunner.class)
public class UserAlertGroupServiceTest {
@InjectMocks
UserAlertGroupService userAlertGroupService;
@Mock
UserAlertGroupMapper userAlertGroupMapper;
@Test
public void deleteByAlertGroupId() {
Integer groupId = 1;
userAlertGroupService.deleteByAlertGroupId(groupId);
ArgumentCaptor<Integer> argumentCaptor = ArgumentCaptor.forClass(Integer.class);
Mockito.verify(userAlertGroupMapper).deleteByAlertgroupId(argumentCaptor.capture());
assertEquals(argumentCaptor.getValue(), groupId);
}
}

7
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java

@ -43,6 +43,7 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Map;
import static org.junit.Assert.*;
@ -173,7 +174,11 @@ public class CheckUtilsTest {
// MapreduceParameters
MapreduceParameters mapreduceParameters = new MapreduceParameters();
assertFalse(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString()));
mapreduceParameters.setMainJar(new ResourceInfo());
ResourceInfo resourceInfoMapreduce = new ResourceInfo();
resourceInfoMapreduce.setId(1);
resourceInfoMapreduce.setRes("");
mapreduceParameters.setMainJar(resourceInfoMapreduce);
mapreduceParameters.setProgramType(ProgramType.JAVA);
assertTrue(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString()));

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java

@ -972,7 +972,8 @@ public final class Constants {
public static final String JDBC_POSTGRESQL = "jdbc:postgresql://";
public static final String JDBC_HIVE_2 = "jdbc:hive2://";
public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://";
public static final String JDBC_ORACLE = "jdbc:oracle:thin:@//";
public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@";
public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//";
public static final String JDBC_SQLSERVER = "jdbc:sqlserver://";
public static final String JDBC_DB2 = "jdbc:db2://";

12
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java

@ -23,13 +23,17 @@ import com.baomidou.mybatisplus.annotation.EnumValue;
*/
public enum AuthorizationType {
/**
* 0 RESOURCE_FILE;
* 0 RESOURCE_FILE_ID;
* 0 RESOURCE_FILE_NAME;
* 1 UDF_FILE;
* 1 DATASOURCE;
* 2 UDF;
*/
RESOURCE_FILE(0, "resource file"),
DATASOURCE(1, "data source"),
UDF(2, "udf function");
RESOURCE_FILE_ID(0, "resource file id"),
RESOURCE_FILE_NAME(1, "resource file name"),
UDF_FILE(2, "udf file"),
DATASOURCE(3, "data source"),
UDF(4, "udf function");
AuthorizationType(int code, String descp){
this.code = code;

44
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java

@ -0,0 +1,44 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
public enum DbConnectType {
ORACLE_SERVICE_NAME(0, "Oracle Service Name"),
ORACLE_SID(1, "Oracle SID");
DbConnectType(int code, String descp) {
this.code = code;
this.descp = descp;
}
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/TaskStateType.java

@ -60,7 +60,7 @@ public enum TaskStateType {
default:
break;
}
return null;
return new int[0];
}
}

8
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ZKNodeType.java

@ -22,10 +22,10 @@ package org.apache.dolphinscheduler.common.enums;
public enum ZKNodeType {
/**
* 0 do not send warning;
* 1 send if process success;
* 2 send if process failed;
* 3 send if process ending;
* 0 master node;
* 1 worker node;
* 2 dead_server node;
* 3 task_queue node;
*/
MASTER, WORKER, DEAD_SERVER, TASK_QUEUE;
}

6
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/TaskNode.java

@ -293,14 +293,14 @@ public class TaskNode {
public TaskTimeoutParameter getTaskTimeoutParameter() {
if(StringUtils.isNotEmpty(this.getTimeout())){
String formatStr = String.format("%s,%s", TaskTimeoutStrategy.WARN.name(), TaskTimeoutStrategy.FAILED.name());
String timeout = this.getTimeout().replace(formatStr,TaskTimeoutStrategy.WARNFAILED.name());
return JSON.parseObject(timeout,TaskTimeoutParameter.class);
String taskTimeout = this.getTimeout().replace(formatStr,TaskTimeoutStrategy.WARNFAILED.name());
return JSON.parseObject(taskTimeout,TaskTimeoutParameter.class);
}
return new TaskTimeoutParameter(false);
}
public boolean isConditionsTask(){
return this.getType().toUpperCase().equals(TaskType.CONDITIONS.toString());
return TaskType.CONDITIONS.toString().equalsIgnoreCase(this.getType());
}
@Override

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java

@ -23,6 +23,16 @@ public class ResourceInfo {
/**
* res the name of the resource that was uploaded
*/
private int id;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
private String res;
public String getRes() {

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java

@ -17,6 +17,7 @@
package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import java.util.LinkedHashMap;
import java.util.List;
@ -31,7 +32,7 @@ public abstract class AbstractParameters implements IParameters {
public abstract boolean checkParameters();
@Override
public abstract List<String> getResourceFilesList();
public abstract List<ResourceInfo> getResourceFilesList();
/**
* local parameters

4
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java

@ -16,6 +16,8 @@
*/
package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import java.util.List;
/**
@ -34,5 +36,5 @@ public interface IParameters {
*
* @return resource files list
*/
List<String> getResourceFilesList();
List<ResourceInfo> getResourceFilesList();
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task.conditions;
import org.apache.dolphinscheduler.common.enums.DependentRelation;
import org.apache.dolphinscheduler.common.model.DependentTaskModel;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.List;
@ -41,7 +42,7 @@ public class ConditionsParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return null;
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java

@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
/**
@ -198,7 +199,7 @@ public class DataxParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task.dependent;
import org.apache.dolphinscheduler.common.enums.DependentRelation;
import org.apache.dolphinscheduler.common.model.DependentTaskModel;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.ArrayList;
@ -36,7 +37,7 @@ public class DependentParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

29
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java

@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.flink;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.Collections;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* spark parameters
@ -50,35 +50,35 @@ public class FlinkParameters extends AbstractParameters {
private String mainArgs;
/**
* slot个数
* slot count
*/
private int slot;
/**
*Yarn application的名字
*Yarn application name
*/
private String appName;
/**
* taskManager 数量
* taskManager count
*/
private int taskManager;
/**
* jobManagerMemory 内存大小
* job manager memory
*/
private String jobManagerMemory ;
/**
* taskManagerMemory内存大小
* task manager memory
*/
private String taskManagerMemory;
/**
* resource list
*/
private List<ResourceInfo> resourceList;
private List<ResourceInfo> resourceList = new ArrayList<>();
/**
* The YARN queue to submit to
@ -207,16 +207,11 @@ public class FlinkParameters extends AbstractParameters {
@Override
public List<String> getResourceFilesList() {
if(resourceList != null ) {
List<String> resourceFiles = resourceList.stream()
.map(ResourceInfo::getRes).collect(Collectors.toList());
if(mainJar != null) {
resourceFiles.add(mainJar.getRes());
}
return resourceFiles;
public List<ResourceInfo> getResourceFilesList() {
if (mainJar != null && !resourceList.contains(mainJar)) {
resourceList.add(mainJar);
}
return Collections.emptyList();
return resourceList;
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java

@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.common.task.http;
import org.apache.dolphinscheduler.common.enums.HttpCheckCondition;
import org.apache.dolphinscheduler.common.enums.HttpMethod;
import org.apache.dolphinscheduler.common.process.HttpProperty;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
@ -62,7 +63,7 @@ public class HttpParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

20
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java

@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.mr;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.Collections;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class MapreduceParameters extends AbstractParameters {
@ -54,7 +54,7 @@ public class MapreduceParameters extends AbstractParameters {
/**
* resource list
*/
private List<ResourceInfo> resourceList;
private List<ResourceInfo> resourceList = new ArrayList<>();
/**
* program type
@ -125,16 +125,12 @@ public class MapreduceParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
if(resourceList != null ) {
List<String> resourceFiles = resourceList.stream()
.map(ResourceInfo::getRes).collect(Collectors.toList());
if(mainJar != null) {
resourceFiles.add(mainJar.getRes());
}
return resourceFiles;
public List<ResourceInfo> getResourceFilesList() {
if (mainJar != null && !resourceList.contains(mainJar)) {
resourceList.add(mainJar);
}
return Collections.emptyList();
return resourceList;
}
@Override

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java

@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.task.procedure;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
@ -74,7 +75,7 @@ public class ProcedureParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java

@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.List;
import java.util.stream.Collectors;
public class PythonParameters extends AbstractParameters {
/**
@ -56,12 +55,7 @@ public class PythonParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
if (resourceList != null) {
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
public List<ResourceInfo> getResourceFilesList() {
return this.resourceList;
}
}

9
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java

@ -59,12 +59,7 @@ public class ShellParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
if (resourceList != null) {
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
public List<ResourceInfo> getResourceFilesList() {
return resourceList;
}
}

16
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java

@ -19,9 +19,10 @@ package org.apache.dolphinscheduler.common.task.spark;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* spark parameters
@ -77,7 +78,7 @@ public class SparkParameters extends AbstractParameters {
/**
* resource list
*/
private List<ResourceInfo> resourceList;
private List<ResourceInfo> resourceList = new ArrayList<>();
/**
* The YARN queue to submit to
@ -218,15 +219,12 @@ public class SparkParameters extends AbstractParameters {
return mainJar != null && programType != null && sparkVersion != null;
}
@Override
public List<String> getResourceFilesList() {
if(resourceList !=null ) {
this.resourceList.add(mainJar);
return resourceList.stream()
.map(ResourceInfo::getRes).collect(Collectors.toList());
public List<ResourceInfo> getResourceFilesList() {
if (mainJar != null && !resourceList.contains(mainJar)) {
resourceList.add(mainJar);
}
return null;
return resourceList;
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java

@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.task.sql;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
@ -189,7 +190,7 @@ public class SqlParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java

@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.task.sqoop;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.StringUtils;
@ -111,7 +112,7 @@ public class SqoopParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java

@ -15,6 +15,7 @@
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.task.subprocess;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.ArrayList;
@ -42,7 +43,7 @@ public class SubProcessParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}
}

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java

@ -23,7 +23,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
*/
public class Stopper {
private static volatile AtomicBoolean signal = new AtomicBoolean(false);
private static AtomicBoolean signal = new AtomicBoolean(false);
public static final boolean isStopped(){
return signal.get();

36
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/ThreadPoolExecutors.java

@ -74,21 +74,21 @@ public class ThreadPoolExecutors {
* @param event
*/
public void execute(final Runnable event) {
Executor executor = getExecutor();
if (executor == null) {
logger.error("Cannot execute [" + event + "] because the executor is missing.");
Executor eventExecutor = getExecutor();
if (eventExecutor == null) {
logger.error("Cannot execute [{}}] because the executor is missing.", event);
} else {
executor.execute(event);
eventExecutor.execute(event);
}
}
public Future<?> submit(Runnable event) {
Executor executor = getExecutor();
if (executor == null) {
logger.error("Cannot submit [" + event + "] because the executor is missing.");
Executor eventExecutor = getExecutor();
if (eventExecutor == null) {
logger.error("Cannot submit [{}}] because the executor is missing.", event);
} else {
return executor.submit(event);
return eventExecutor.submit(event);
}
return null;
@ -97,11 +97,11 @@ public class ThreadPoolExecutors {
public Future<?> submit(Callable<?> task) {
Executor executor = getExecutor();
if (executor == null) {
logger.error("Cannot submit [" + task + "] because the executor is missing.");
Executor taskExecutor = getExecutor();
if (taskExecutor == null) {
logger.error("Cannot submit [{}] because the executor is missing.", task);
} else {
return executor.submit(task);
return taskExecutor.submit(task);
}
return null;
@ -110,8 +110,8 @@ public class ThreadPoolExecutors {
public void printStatus() {
Executor executor = getExecutor();
executor.getStatus().dumpInfo();
Executor printExecutor = getExecutor();
printExecutor.getStatus().dumpInfo();
}
@ -125,7 +125,7 @@ public class ThreadPoolExecutors {
List<Runnable> wasRunning = executor.threadPoolExecutor
.shutdownNow();
if (!wasRunning.isEmpty()) {
logger.info(executor + " had " + wasRunning + " on shutdown");
logger.info("{} had {} on shutdown", executor, wasRunning);
}
}
}
@ -138,7 +138,7 @@ public class ThreadPoolExecutors {
/**
* how long to retain excess threads
*/
final long keepAliveTimeInMillis = 1000;
static final long KEEP_ALIVE_TIME_IN_MILLIS = 1000;
/**
* the thread pool executor that services the requests
*/
@ -146,7 +146,7 @@ public class ThreadPoolExecutors {
/**
* work queue to use - unbounded queue
*/
final BlockingQueue<Runnable> q = new LinkedBlockingQueue<Runnable>();
final BlockingQueue<Runnable> q = new LinkedBlockingQueue<>();
private final String name;
private static final AtomicLong seqids = new AtomicLong(0);
private final long id;
@ -156,7 +156,7 @@ public class ThreadPoolExecutors {
this.name = name;
//create the thread pool executor
this.threadPoolExecutor = new TrackingThreadPoolExecutor(
maxThreads, maxThreads, keepAliveTimeInMillis,
maxThreads, maxThreads, KEEP_ALIVE_TIME_IN_MILLIS,
TimeUnit.MILLISECONDS, q);
// name the threads for this threadpool
ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();

103
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/ConnectionUtils.java

@ -16,86 +16,35 @@
*/
package org.apache.dolphinscheduler.common.utils;
import java.util.Arrays;
import java.util.Objects;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.*;
public class ConnectionUtils {
public static final Logger logger = LoggerFactory.getLogger(ConnectionUtils.class);
private static ConnectionUtils instance;
ConnectionUtils() {
}
public static ConnectionUtils getInstance() {
if (null == instance) {
syncInit();
}
return instance;
}
private static synchronized void syncInit() {
if (instance == null) {
instance = new ConnectionUtils();
}
}
public void release(ResultSet rs, Statement stmt, Connection conn) {
try {
if (rs != null) {
rs.close();
rs = null;
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
} finally {
try {
if (stmt != null) {
stmt.close();
stmt = null;
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
} finally {
try {
if (conn != null) {
conn.close();
conn = null;
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
}
}
}
}
public static void releaseResource(ResultSet rs, PreparedStatement ps, Connection conn) {
ConnectionUtils.getInstance().release(rs,ps,conn);
if (null != rs) {
try {
rs.close();
} catch (SQLException e) {
logger.error(e.getMessage(),e);
}
}
if (null != ps) {
try {
ps.close();
} catch (SQLException e) {
logger.error(e.getMessage(),e);
}
}
if (null != conn) {
try {
conn.close();
} catch (SQLException e) {
logger.error(e.getMessage(),e);
}
}
}
public static final Logger logger = LoggerFactory.getLogger(ConnectionUtils.class);
private ConnectionUtils() {
throw new IllegalStateException("ConnectionUtils class");
}
/**
* release resource
* @param resources resources
*/
public static void releaseResource(AutoCloseable... resources) {
if (resources == null || resources.length == 0) {
return;
}
Arrays.stream(resources).filter(Objects::nonNull)
.forEach(resource -> {
try {
resource.close();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
});
}
}

49
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java

@ -26,6 +26,7 @@ import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.io.IOUtils;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
@ -415,6 +416,22 @@ public class HadoopUtils implements Closeable {
}
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @return hdfs resource dir
*/
public static String getHdfsDir(ResourceType resourceType,String tenantCode) {
String hdfsDir = "";
if (resourceType.equals(ResourceType.FILE)) {
hdfsDir = getHdfsResDir(tenantCode);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsDir = getHdfsUdfDir(tenantCode);
}
return hdfsDir;
}
/**
* hdfs resource dir
*
@ -450,22 +467,42 @@ public class HadoopUtils implements Closeable {
* get absolute path and name for file on hdfs
*
* @param tenantCode tenant code
* @param filename file name
* @param fileName file name
* @return get absolute path and name for file on hdfs
*/
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param fileName file name
* @return hdfs file name
*/
public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) {
return String.format("%s/%s", getHdfsDir(resourceType,tenantCode), fileName);
}
/**
* get absolute path and name for resource file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for file on hdfs
*/
public static String getHdfsFilename(String tenantCode, String filename) {
return String.format("%s/%s", getHdfsResDir(tenantCode), filename);
public static String getHdfsResourceFileName(String tenantCode, String fileName) {
return String.format("%s/%s", getHdfsResDir(tenantCode), fileName);
}
/**
* get absolute path and name for udf file on hdfs
*
* @param tenantCode tenant code
* @param filename file name
* @param fileName file name
* @return get absolute path and name for udf file on hdfs
*/
public static String getHdfsUdfFilename(String tenantCode, String filename) {
return String.format("%s/%s", getHdfsUdfDir(tenantCode), filename);
public static String getHdfsUdfFileName(String tenantCode, String fileName) {
return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName);
}
/**

16
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java

@ -352,13 +352,7 @@ public class OSUtils {
return sb.toString();
} finally {
if (br != null) {
try {
br.close();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
IOUtils.closeQuietly(br);
}
}
@ -408,7 +402,7 @@ public class OSUtils {
* whether is windows
* @return true if windows
*/
public static boolean isWindows() { ;
public static boolean isWindows() {
return getOSName().startsWith("Windows");
}
@ -425,13 +419,13 @@ public class OSUtils {
* @return check memory and cpu usage
*/
public static Boolean checkResource(double systemCpuLoad, double systemReservedMemory){
// judging usage
// system load average
double loadAverage = OSUtils.loadAverage();
//
// system available physical memory
double availablePhysicalMemorySize = OSUtils.availablePhysicalMemorySize();
if(loadAverage > systemCpuLoad || availablePhysicalMemorySize < systemReservedMemory){
logger.warn("load or availablePhysicalMemorySize(G) is too high, it's availablePhysicalMemorySize(G):{},loadAvg:{}", availablePhysicalMemorySize , loadAverage);
logger.warn("load is too high or availablePhysicalMemorySize(G) is too low, it's availablePhysicalMemorySize(G):{},loadAvg:{}", availablePhysicalMemorySize , loadAverage);
return false;
}else{
return true;

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/PropertyUtils.java

@ -71,7 +71,7 @@ public class PropertyUtils {
*
* @return judge whether resource upload startup
*/
public static Boolean getResUploadStartupState(){
public static boolean getResUploadStartupState(){
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
return resUploadType == ResUploadType.HDFS || resUploadType == ResUploadType.S3;

8
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/placeholder/TimePlaceholderUtils.java

@ -35,12 +35,12 @@ public class TimePlaceholderUtils {
/**
* Prefix of the position to be replaced
*/
public static final String placeholderPrefix = "$[";
public static final String PLACEHOLDER_PREFIX = "$[";
/**
* The suffix of the position to be replaced
*/
public static final String placeholderSuffix = "]";
public static final String PLACEHOLDER_SUFFIX = "]";
/**
* Replaces all placeholders of format {@code ${name}} with the value returned
@ -66,7 +66,7 @@ public class TimePlaceholderUtils {
* be ignored ({@code true}) or cause an exception ({@code false})
*/
private static PropertyPlaceholderHelper getPropertyPlaceholderHelper(boolean ignoreUnresolvablePlaceholders) {
return new PropertyPlaceholderHelper(placeholderPrefix, placeholderSuffix, null, ignoreUnresolvablePlaceholders);
return new PropertyPlaceholderHelper(PLACEHOLDER_PREFIX, PLACEHOLDER_SUFFIX, null, ignoreUnresolvablePlaceholders);
}
/**
@ -503,7 +503,7 @@ public class TimePlaceholderUtils {
* @return calculate need minutes
*/
public static Integer calcMinutes(String minuteExpression) {
int index = minuteExpression.indexOf("/");
int index = minuteExpression.indexOf('/');
String calcExpression;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save