oceanos 5 years ago
parent
commit
48c2cb7934
  1. 226
      charts/README.md
  2. 52
      charts/dolphinscheduler/Chart.yaml
  3. 226
      charts/dolphinscheduler/README.md
  4. 44
      charts/dolphinscheduler/templates/NOTES.txt
  5. 149
      charts/dolphinscheduler/templates/_helpers.tpl
  6. 41
      charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
  7. 34
      charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml
  8. 39
      charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml
  9. 228
      charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
  10. 161
      charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
  11. 102
      charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
  12. 43
      charts/dolphinscheduler/templates/ingress.yaml
  13. 35
      charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml
  14. 35
      charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml
  15. 35
      charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml
  16. 29
      charts/dolphinscheduler/templates/secret-external-postgresql.yaml
  17. 247
      charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
  18. 275
      charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
  19. 35
      charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml
  20. 35
      charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml
  21. 36
      charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml
  22. 36
      charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml
  23. 355
      charts/dolphinscheduler/values.yaml
  24. 33
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
  25. 152
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
  26. 29
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java
  27. 8
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java
  28. 193
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java
  29. 28
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java
  30. 100
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
  31. 130
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java
  32. 31
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java
  33. 12
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
  34. 32
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
  35. 34
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
  36. 8
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
  37. 559
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
  38. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java
  39. 4
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java
  40. 58
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
  41. 82
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java
  42. 2
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/enums/StatusTest.java
  43. 112
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
  44. 7
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java
  45. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
  46. 12
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java
  47. 44
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java
  48. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java
  49. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java
  50. 4
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java
  51. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java
  52. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java
  53. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java
  54. 29
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java
  55. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java
  56. 20
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java
  57. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java
  58. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java
  59. 9
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java
  60. 16
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java
  61. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java
  62. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java
  63. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java
  64. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java
  65. 49
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
  66. 14
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/FlinkParametersTest.java
  67. 6
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringTest.java
  68. 15
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java
  69. 15
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessDefinition.java
  70. 68
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Resource.java
  71. 11
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java
  72. 53
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.java
  73. 15
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.java
  74. 11
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
  75. 7
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml
  76. 79
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml
  77. 24
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml
  78. 57
      dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapperTest.java
  79. 6
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/SubProcessTaskExecThread.java
  80. 50
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskScheduleThread.java
  81. 5
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java
  82. 25
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
  83. 26
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/mr/MapReduceTask.java
  84. 17
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTask.java
  85. 26
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTask.java
  86. 31
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
  87. 4
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTask.java
  88. 60
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTaskTest.java
  89. 2
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTaskTest.java
  90. 31
      dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/permission/PermissionCheck.java
  91. 26
      dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
  92. 1
      dolphinscheduler-ui/package.json
  93. 90
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue
  94. 93
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/mr.vue
  95. 50
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/python.vue
  96. 80
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/shell.vue
  97. 97
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/spark.vue
  98. 38
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js
  99. 2
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/util.js
  100. 4
      dolphinscheduler-ui/src/js/conf/home/pages/dag/definitionDetails.vue
  101. Some files were not shown because too many files have changed in this diff Show More

226
charts/README.md

@ -0,0 +1,226 @@
# Dolphin Scheduler
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
## Introduction
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler
$ helm install --name dolphinscheduler .
```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `dolphinscheduler` deployment:
```bash
$ helm delete --purge dolphinscheduler
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
| | | |
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
| | | |
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `ingress.enabled` | Enable ingress | `false` |
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
| `ingress.path` | Ingress path | `/` |
| `ingress.tls.enabled` | Enable ingress tls | `false` |
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.

52
charts/dolphinscheduler/Chart.yaml

@ -0,0 +1,52 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v2
name: dolphinscheduler
description: Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
home: https://dolphinscheduler.apache.org
icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
keywords:
- dolphinscheduler
- Scheduler
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 1.2.1
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

226
charts/dolphinscheduler/README.md

@ -0,0 +1,226 @@
# Dolphin Scheduler
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
## Introduction
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler
$ helm install --name dolphinscheduler .
```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `dolphinscheduler` deployment:
```bash
$ helm delete --purge dolphinscheduler
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
| | | |
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
| | | |
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `ingress.enabled` | Enable ingress | `false` |
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
| `ingress.path` | Ingress path | `/` |
| `ingress.tls.enabled` | Enable ingress tls | `false` |
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.

44
charts/dolphinscheduler/templates/NOTES.txt

@ -0,0 +1,44 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
** Please be patient while the chart is being deployed **
1. Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }}
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
echo "Dolphinscheduler URL: http://$HOSTNAME/"
{{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}
2. Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }}
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
echo "Dolphinscheduler URL: http://$HOSTNAME/"
{{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}

149
charts/dolphinscheduler/templates/_helpers.tpl

@ -0,0 +1,149 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "dolphinscheduler.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dolphinscheduler.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dolphinscheduler.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "dolphinscheduler.labels" -}}
helm.sh/chart: {{ include "dolphinscheduler.chart" . }}
{{ include "dolphinscheduler.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "dolphinscheduler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "dolphinscheduler.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "dolphinscheduler.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create a default docker image registry.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.image.registry" -}}
{{- $registry := default "docker.io" .Values.image.registry -}}
{{- printf "%s" $registry | trunc 63 | trimSuffix "/" -}}
{{- end -}}
{{/*
Create a default docker image repository.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.image.repository" -}}
{{- printf "%s/%s:%s" (include "dolphinscheduler.image.registry" .) .Values.image.repository .Values.image.tag -}}
{{- end -}}
{{/*
Create a default fully qualified postgresql name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.postgresql.fullname" -}}
{{- $name := default "postgresql" .Values.postgresql.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified zookkeeper name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.zookeeper.fullname" -}}
{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified zookkeeper quorum.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.zookeeper.quorum" -}}
{{- $port := default "2181" (.Values.zookeeper.service.port | toString) -}}
{{- printf "%s:%s" (include "dolphinscheduler.zookeeper.fullname" .) $port | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker base dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.base.dir" -}}
{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker data download dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.data.download.dir" -}}
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/download" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker process exec dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.process.exec.dir" -}}
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/exec" -}}
{{- end -}}

41
charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml

@ -0,0 +1,41 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.alert.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-alert
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}
MAIL_SERVER_PORT: {{ .Values.alert.configmap.MAIL_SERVER_PORT | quote }}
MAIL_SENDER: {{ .Values.alert.configmap.MAIL_SENDER | quote }}
MAIL_USER: {{ .Values.alert.configmap.MAIL_USER | quote }}
MAIL_PASSWD: {{ .Values.alert.configmap.MAIL_PASSWD | quote }}
MAIL_SMTP_STARTTLS_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_STARTTLS_ENABLE | quote }}
MAIL_SMTP_SSL_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_SSL_ENABLE | quote }}
MAIL_SMTP_SSL_TRUST: {{ .Values.alert.configmap.MAIL_SMTP_SSL_TRUST | quote }}
ENTERPRISE_WECHAT_ENABLE: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_ENABLE | quote }}
ENTERPRISE_WECHAT_CORP_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_CORP_ID | quote }}
ENTERPRISE_WECHAT_SECRET: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_SECRET | quote }}
ENTERPRISE_WECHAT_AGENT_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_AGENT_ID | quote }}
ENTERPRISE_WECHAT_USERS: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_USERS | quote }}
{{- end }}

34
charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml

@ -0,0 +1,34 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.master.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }}
MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }}
MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }}
MASTER_TASK_COMMIT_RETRYTIMES: {{ .Values.master.configmap.MASTER_TASK_COMMIT_RETRYTIMES | quote }}
MASTER_TASK_COMMIT_INTERVAL: {{ .Values.master.configmap.MASTER_TASK_COMMIT_INTERVAL | quote }}
MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
{{- end }}

39
charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml

@ -0,0 +1,39 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.worker.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }}
WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }}
WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH: {{ include "dolphinscheduler.worker.data.download.dir" . | quote }}
DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH: {{ include "dolphinscheduler.worker.process.exec.dir" . | quote }}
dolphinscheduler_env.sh: |-
{{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
{{ . }}
{{- end }}
{{- end }}

228
charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml

@ -0,0 +1,228 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-alert
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
spec:
replicas: {{ .Values.alert.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
strategy:
type: {{ .Values.alert.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.alert.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.alert.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
spec:
{{- if .Values.alert.affinity }}
affinity: {{- toYaml .Values.alert.affinity | nindent 8 }}
{{- end }}
{{- if .Values.alert.nodeSelector }}
nodeSelector: {{- toYaml .Values.alert.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.alert.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "alert-server"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
key: XLS_FILE_PATH
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_HOST
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_HOST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_PORT
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_PORT
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SENDER
valueFrom:
configMapKeyRef:
key: MAIL_SENDER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_USER
valueFrom:
configMapKeyRef:
key: MAIL_USER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_PASSWD
valueFrom:
configMapKeyRef:
key: MAIL_PASSWD
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_STARTTLS_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_STARTTLS_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_TRUST
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_TRUST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_ENABLE
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_CORP_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_CORP_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_SECRET
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_SECRET
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_AGENT_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_AGENT_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_USERS
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_USERS
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
{{- if .Values.alert.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.alert.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.alert.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.alert.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.alert.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.alert.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.alert.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.alert.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.alert.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.alert.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.alert.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.alert.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-alert
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
{{- if .Values.alert.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-alert
{{- else }}
emptyDir: {}
{{- end }}

161
charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml

@ -0,0 +1,161 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
spec:
replicas: {{ .Values.api.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
strategy:
type: {{ .Values.api.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.api.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.api.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
spec:
{{- if .Values.api.affinity }}
affinity: {{- toYaml .Values.api.affinity | nindent 8 }}
{{- end }}
{{- if .Values.api.nodeSelector }}
nodeSelector: {{- toYaml .Values.api.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.api.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-api
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "api-server"
ports:
- containerPort: 12345
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
{{- if .Values.api.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: 12345
initialDelaySeconds: {{ .Values.api.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.api.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.api.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.api.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.api.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.api.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: 12345
initialDelaySeconds: {{ .Values.api.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.api.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.api.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.api.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.api.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-api
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-api
{{- if .Values.api.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-api
{{- else }}
emptyDir: {}
{{- end }}

102
charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml

@ -0,0 +1,102 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
spec:
replicas: {{ .Values.frontend.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
strategy:
type: {{ .Values.frontend.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.frontend.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.frontend.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
spec:
{{- if .Values.frontend.affinity }}
affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
{{- end }}
{{- if .Values.frontend.nodeSelector }}
nodeSelector: {{- toYaml .Values.frontend.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.frontend.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "frontend"
ports:
- containerPort: 8888
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: FRONTEND_API_SERVER_HOST
value: '{{ include "dolphinscheduler.fullname" . }}-api'
- name: FRONTEND_API_SERVER_PORT
value: "12345"
{{- if .Values.frontend.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.frontend.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/var/log/nginx"
name: {{ include "dolphinscheduler.fullname" . }}-frontend
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- else }}
emptyDir: {}
{{- end }}

43
charts/dolphinscheduler/templates/ingress.yaml

@ -0,0 +1,43 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ include "dolphinscheduler.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: {{ .Values.ingress.path }}
backend:
serviceName: {{ include "dolphinscheduler.fullname" . }}-frontend
servicePort: tcp-port
{{- if .Values.ingress.tls.enabled }}
tls:
hosts:
{{- range .Values.ingress.tls.hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end }}
{{- end }}

35
charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.alert.persistentVolumeClaim.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-alert
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.alert.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.alert.persistentVolumeClaim.storage | quote }}
{{- end }}

35
charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.api.persistentVolumeClaim.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.api.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.api.persistentVolumeClaim.storage | quote }}
{{- end }}

35
charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.frontend.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.frontend.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.frontend.persistentVolumeClaim.storage | quote }}
{{- end }}

29
charts/dolphinscheduler/templates/secret-external-postgresql.yaml

@ -0,0 +1,29 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if not .Values.postgresql.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-postgresql
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
type: Opaque
data:
db-password: {{ .Values.externalDatabase.password | b64enc | quote }}
{{- end }}

247
charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml

@ -0,0 +1,247 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
spec:
podManagementPolicy: {{ .Values.master.podManagementPolicy }}
replicas: {{ .Values.master.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
serviceName: {{ template "dolphinscheduler.fullname" . }}-master-headless
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
spec:
{{- if .Values.master.affinity }}
affinity: {{- toYaml .Values.master.affinity | nindent 8 }}
{{- end }}
{{- if .Values.master.nodeSelector }}
nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-master
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "master-server"
ports:
- containerPort: 8888
name: unused-tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: MASTER_EXEC_THREADS
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_EXEC_THREADS
- name: MASTER_EXEC_TASK_NUM
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_EXEC_TASK_NUM
- name: MASTER_HEARTBEAT_INTERVAL
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_HEARTBEAT_INTERVAL
- name: MASTER_TASK_COMMIT_RETRYTIMES
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_TASK_COMMIT_RETRYTIMES
- name: MASTER_TASK_COMMIT_INTERVAL
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_TASK_COMMIT_INTERVAL
- name: MASTER_MAX_CPULOAD_AVG
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_MAX_CPULOAD_AVG
- name: MASTER_RESERVED_MEMORY
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_RESERVED_MEMORY
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: TASK_QUEUE
{{- if .Values.zookeeper.enabled }}
value: {{ .Values.zookeeper.taskQueue }}
{{- else }}
value: {{ .Values.externalZookeeper.taskQueue }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: {{ template "dolphinscheduler.zookeeper.quorum" . }}
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
{{- if .Values.master.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- master-server
initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.master.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- master-server
initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-master
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-master
{{- if .Values.master.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-master
{{- else }}
emptyDir: {}
{{- end }}
{{- if .Values.master.persistentVolumeClaim.enabled }}
volumeClaimTemplates:
- metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.master.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.master.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.master.persistentVolumeClaim.storage | quote }}
{{- end }}

275
charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml

@ -0,0 +1,275 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
spec:
podManagementPolicy: {{ .Values.worker.podManagementPolicy }}
replicas: {{ .Values.worker.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
serviceName: {{ template "dolphinscheduler.fullname" . }}-worker-headless
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
spec:
{{- if .Values.worker.affinity }}
affinity: {{- toYaml .Values.worker.affinity | nindent 8 }}
{{- end }}
{{- if .Values.worker.nodeSelector }}
nodeSelector: {{- toYaml .Values.worker.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.worker.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-worker
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "worker-server"
ports:
- containerPort: 50051
name: "logs-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: WORKER_EXEC_THREADS
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_EXEC_THREADS
- name: WORKER_FETCH_TASK_NUM
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_FETCH_TASK_NUM
- name: WORKER_HEARTBEAT_INTERVAL
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_HEARTBEAT_INTERVAL
- name: WORKER_MAX_CPULOAD_AVG
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_MAX_CPULOAD_AVG
- name: WORKER_RESERVED_MEMORY
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_RESERVED_MEMORY
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: TASK_QUEUE
{{- if .Values.zookeeper.enabled }}
value: {{ .Values.zookeeper.taskQueue }}
{{- else }}
value: {{ .Values.externalZookeeper.taskQueue }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
{{- if .Values.worker.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.worker.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.worker.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.worker.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.worker.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
- mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"
subPath: "dolphinscheduler_env.sh"
name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-worker-data
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-worker-data
{{- else }}
emptyDir: {}
{{- end }}
- name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
{{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-worker-logs
{{- else }}
emptyDir: {}
{{- end }}
- name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
configMap:
defaultMode: 0777
name: {{ include "dolphinscheduler.fullname" . }}-worker
items:
- key: dolphinscheduler_env.sh
path: dolphinscheduler_env.sh
{{- if .Values.worker.persistentVolumeClaim.enabled }}
volumeClaimTemplates:
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
- metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-data
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storage | quote }}
{{- end }}
{{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
- metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storage | quote }}
{{- end }}
{{- end }}

35
charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
ports:
- port: 12345
targetPort: tcp-port
protocol: TCP
name: tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api

35
charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
ports:
- port: 8888
targetPort: tcp-port
protocol: TCP
name: tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend

36
charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml

@ -0,0 +1,36 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master-headless
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master-headless
app.kubernetes.io/instance: {{ .Release.Name }}-master-headless
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
clusterIP: "None"
ports:
- port: 8888
targetPort: tcp-port
protocol: TCP
name: unused-tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master

36
charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml

@ -0,0 +1,36 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
app.kubernetes.io/instance: {{ .Release.Name }}-worker-headless
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
clusterIP: "None"
ports:
- port: 50051
targetPort: logs-port
protocol: TCP
name: logs-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker

355
charts/dolphinscheduler/values.yaml

@ -0,0 +1,355 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Default values for dolphinscheduler-chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
nameOverride: ""
fullnameOverride: ""
timezone: "Asia/Shanghai"
image:
registry: "docker.io"
repository: "dolphinscheduler"
tag: "1.2.1"
pullPolicy: "IfNotPresent"
imagePullSecrets: []
# If not exists external postgresql, by default, Dolphinscheduler's database will use it.
postgresql:
enabled: true
postgresqlUsername: "root"
postgresqlPassword: "root"
postgresqlDatabase: "dolphinscheduler"
persistence:
enabled: false
size: "20Gi"
storageClass: "-"
# If exists external postgresql, and set postgresql.enable value to false.
# If postgresql.enable is false, Dolphinscheduler's database will use it.
externalDatabase:
host: "localhost"
port: "5432"
username: "root"
password: "root"
database: "dolphinscheduler"
# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
zookeeper:
enabled: true
taskQueue: "zookeeper"
persistence:
enabled: false
size: "20Gi"
storageClass: "-"
# If exists external zookeeper, and set zookeeper.enable value to false.
# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
externalZookeeper:
taskQueue: "zookeeper"
zookeeperQuorum: "127.0.0.1:2181"
master:
podManagementPolicy: "Parallel"
replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
MASTER_HEARTBEAT_INTERVAL: "10"
MASTER_TASK_COMMIT_RETRYTIMES: "5"
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
worker:
podManagementPolicy: "Parallel"
replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
configmap:
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
DOLPHINSCHEDULER_ENV:
- "export HADOOP_HOME=/opt/soft/hadoop"
- "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
- "export SPARK_HOME1=/opt/soft/spark1"
- "export SPARK_HOME2=/opt/soft/spark2"
- "export PYTHON_HOME=/opt/soft/python"
- "export JAVA_HOME=/opt/soft/java"
- "export HIVE_HOME=/opt/soft/hive"
- "export FLINK_HOME=/opt/soft/flink"
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
## dolphinscheduler data volume
dataPersistentVolume:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
## dolphinscheduler logs volume
logsPersistentVolume:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
alert:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: false
MAIL_SMTP_SSL_ENABLE: false
MAIL_SMTP_SSL_TRUST: ""
ENTERPRISE_WECHAT_ENABLE: false
ENTERPRISE_WECHAT_CORP_ID: ""
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
api:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
frontend:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
ingress:
enabled: false
host: "dolphinscheduler.org"
path: "/"
tls:
enabled: false
hosts:
- "dolphinscheduler.org"
secretName: "dolphinscheduler-tls"

33
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java

@ -16,18 +16,19 @@
*/
package org.apache.dolphinscheduler.api.controller;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.DataSourceService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -76,6 +77,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
})
@PostMapping(value = "/create")
@ -90,11 +92,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {},port: {},database : {},principal: {},userName : {} other: {}",
loginUser.getUserName(), name, note, type, host,port,database,principal,userName,other);
logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {}, port: {}, database : {}, principal: {}, userName : {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, host, port, database, principal, userName, connectType, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Map<String, Object> result = dataSourceService.createDataSource(loginUser, name, note, type, parameter);
return returnDataList(result);
@ -133,6 +136,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
})
@PostMapping(value = "/update")
@ -148,11 +152,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, connectType, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, connectType, other);
Map<String, Object> dataSource = dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter);
return returnDataList(dataSource);
} catch (Exception e) {
@ -277,6 +282,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
})
@PostMapping(value = "/connect")
@ -291,11 +297,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, connectType, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Boolean isConnection = dataSourceService.checkConnection(type, parameter);
Result result = new Result();

152
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java

@ -60,6 +60,50 @@ public class ResourcesController extends BaseController{
@Autowired
private UdfFuncService udfFuncService;
/**
* create resource
*
* @param loginUser login user
* @param alias alias
* @param description description
* @param type type
* @return create result code
*/
/**
*
* @param loginUser login user
* @param type type
* @param alias alias
* @param description description
* @param pid parent id
* @param currentDir current directory
* @return
*/
@ApiOperation(value = "createDirctory", notes= "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType ="String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/directory/create")
public Result createDirectory(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value ="name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value ="pid") int pid,
@RequestParam(value ="currentDir") String currentDir) {
try {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(),type, alias, description,pid,currentDir);
return resourceService.createDirectory(loginUser,alias, description,type ,pid,currentDir);
} catch (Exception e) {
logger.error(CREATE_RESOURCE_ERROR.getMsg(),e);
return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg());
}
}
/**
* create resource
*
@ -80,13 +124,15 @@ public class ResourcesController extends BaseController{
@PostMapping(value = "/create")
public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value ="name")String alias,
@RequestParam(value ="name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam("file") MultipartFile file) {
@RequestParam("file") MultipartFile file,
@RequestParam(value ="pid") int pid,
@RequestParam(value ="currentDir") String currentDir) {
try {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(),type, alias, description, file.getName(), file.getOriginalFilename());
return resourceService.createResource(loginUser,alias, description,type ,file);
return resourceService.createResource(loginUser,alias, description,type ,file,pid,currentDir);
} catch (Exception e) {
logger.error(CREATE_RESOURCE_ERROR.getMsg(),e);
return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg());
@ -120,7 +166,7 @@ public class ResourcesController extends BaseController{
try {
logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}",
loginUser.getUserName(),type, alias, description);
return resourceService.updateResource(loginUser,resourceId,alias, description,type);
return resourceService.updateResource(loginUser,resourceId,alias,description,type);
} catch (Exception e) {
logger.error(UPDATE_RESOURCE_ERROR.getMsg(),e);
return error(Status.UPDATE_RESOURCE_ERROR.getCode(), Status.UPDATE_RESOURCE_ERROR.getMsg());
@ -166,6 +212,7 @@ public class ResourcesController extends BaseController{
@ApiOperation(value = "queryResourceListPaging", notes= "QUERY_RESOURCE_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="int"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType ="String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType ="Int",example = "20")
@ -174,6 +221,7 @@ public class ResourcesController extends BaseController{
@ResponseStatus(HttpStatus.OK)
public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="type") ResourceType type,
@RequestParam(value ="id") int id,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
@ -187,7 +235,7 @@ public class ResourcesController extends BaseController{
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = resourceService.queryResourceListPaging(loginUser,type,searchVal,pageNo, pageSize);
result = resourceService.queryResourceListPaging(loginUser,id,type,searchVal,pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
logger.error(QUERY_RESOURCES_LIST_PAGING.getMsg(),e);
@ -227,32 +275,89 @@ public class ResourcesController extends BaseController{
* verify resource by alias and type
*
* @param loginUser login user
* @param alias resource name
* @param type resource type
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "verifyResourceName", notes= "VERIFY_RESOURCE_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String")
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType ="String")
})
@GetMapping(value = "/verify-name")
@ResponseStatus(HttpStatus.OK)
public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="name") String alias,
@RequestParam(value ="fullName") String fullName,
@RequestParam(value ="type") ResourceType type
) {
try {
logger.info("login user {}, verfiy resource alias: {},resource type: {}",
loginUser.getUserName(), alias,type);
loginUser.getUserName(), fullName,type);
return resourceService.verifyResourceName(alias,type,loginUser);
return resourceService.verifyResourceName(fullName,type,loginUser);
} catch (Exception e) {
logger.error(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg(), e);
return error(Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getCode(), Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg());
}
}
/**
* query resources jar list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceJarList", notes= "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType")
})
@GetMapping(value="/list/jar")
@ResponseStatus(HttpStatus.OK)
public Result queryResourceJarList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="type") ResourceType type
){
try{
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString());
Map<String, Object> result = resourceService.queryResourceJarList(loginUser, type);
return returnDataList(result);
}catch (Exception e){
logger.error(QUERY_RESOURCES_LIST_ERROR.getMsg(),e);
return error(Status.QUERY_RESOURCES_LIST_ERROR.getCode(), Status.QUERY_RESOURCES_LIST_ERROR.getMsg());
}
}
/**
* query resource by full name and type
*
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "queryResource", notes= "QUERY_BY_RESOURCE_NAME")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType ="String")
})
@GetMapping(value = "/queryResource")
@ResponseStatus(HttpStatus.OK)
public Result queryResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="fullName",required = false) String fullName,
@RequestParam(value ="id",required = false) Integer id,
@RequestParam(value ="type") ResourceType type
) {
try {
logger.info("login user {}, query resource by full name: {} or id: {},resource type: {}",
loginUser.getUserName(), fullName,id,type);
return resourceService.queryResource(fullName,id,type);
} catch (Exception e) {
logger.error(RESOURCE_NOT_EXIST.getMsg(), e);
return error(Status.RESOURCE_NOT_EXIST.getCode(), Status.RESOURCE_NOT_EXIST.getMsg());
}
}
/**
* view resource file online
*
@ -310,16 +415,18 @@ public class ResourcesController extends BaseController{
@RequestParam(value ="fileName")String fileName,
@RequestParam(value ="suffix")String fileSuffix,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "content") String content
@RequestParam(value = "content") String content,
@RequestParam(value ="pid") int pid,
@RequestParam(value ="currentDir") String currentDir
) {
try{
logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}",
loginUser.getUserName(),fileName,type,fileSuffix,description,content);
loginUser.getUserName(),fileName,type,fileSuffix,description,content,pid,currentDir);
if(StringUtils.isEmpty(content)){
logger.error("resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.onlineCreateResource(loginUser,type,fileName,fileSuffix,description,content);
return resourceService.onlineCreateResource(loginUser,type,fileName,fileSuffix,description,content,pid,currentDir);
}catch (Exception e){
logger.error(CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg(),e);
return error(Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getCode(), Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg());
@ -384,6 +491,9 @@ public class ResourcesController extends BaseController{
.ok()
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"")
.body(file);
}catch (RuntimeException e){
logger.error(e.getMessage(),e);
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(e.getMessage());
}catch (Exception e){
logger.error(DOWNLOAD_RESOURCE_FILE_ERROR.getMsg(),e);
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.DOWNLOAD_RESOURCE_FILE_ERROR.getMsg());
@ -658,21 +768,21 @@ public class ResourcesController extends BaseController{
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "unauthorizedFile", notes= "UNAUTHORIZED_FILE_NOTES")
@ApiOperation(value = "authorizeResourceTree", notes= "AUTHORIZE_RESOURCE_TREE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/unauth-file")
@GetMapping(value = "/authorize-resource-tree")
@ResponseStatus(HttpStatus.CREATED)
public Result unauthorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
public Result authorizeResourceTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
try{
logger.info("resource unauthorized file, user:{}, unauthorized user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.unauthorizedFile(loginUser, userId);
logger.info("all resource file, user:{}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizeResourceTree(loginUser, userId);
return returnDataList(result);
}catch (Exception e){
logger.error(UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg(),e);
return error(Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getCode(), Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg());
logger.error(AUTHORIZE_RESOURCE_TREE.getMsg(),e);
return error(Status.AUTHORIZE_RESOURCE_TREE.getCode(), Status.AUTHORIZE_RESOURCE_TREE.getMsg());
}
}

29
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java

@ -0,0 +1,29 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* directory
*/
public class Directory extends ResourceComponent{
@Override
public boolean isDirctory() {
return true;
}
}

8
dolphinscheduler-ui/src/sass/common/_mixin.scss → dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java

@ -1,3 +1,5 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -14,3 +16,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* file leaf
*/
public class FileLeaf extends ResourceComponent{
}

193
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java

@ -0,0 +1,193 @@
package org.apache.dolphinscheduler.api.dto.resources;
import com.alibaba.fastjson.annotation.JSONField;
import com.alibaba.fastjson.annotation.JSONType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import java.util.ArrayList;
import java.util.List;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* resource component
*/
@JSONType(orders={"id","pid","name","fullName","description","isDirctory","children","type"})
public abstract class ResourceComponent {
public ResourceComponent() {
}
public ResourceComponent(int id, int pid, String name, String fullName, String description, boolean isDirctory) {
this.id = id;
this.pid = pid;
this.name = name;
this.fullName = fullName;
this.description = description;
this.isDirctory = isDirctory;
int directoryFlag = isDirctory ? 1:0;
this.idValue = String.format("%s_%s",id,directoryFlag);
}
/**
* id
*/
@JSONField(ordinal = 1)
protected int id;
/**
* parent id
*/
@JSONField(ordinal = 2)
protected int pid;
/**
* name
*/
@JSONField(ordinal = 3)
protected String name;
/**
* current directory
*/
protected String currentDir;
/**
* full name
*/
@JSONField(ordinal = 4)
protected String fullName;
/**
* description
*/
@JSONField(ordinal = 5)
protected String description;
/**
* is directory
*/
@JSONField(ordinal = 6)
protected boolean isDirctory;
/**
* id value
*/
@JSONField(ordinal = 7)
protected String idValue;
/**
* resoruce type
*/
@JSONField(ordinal = 8)
protected ResourceType type;
/**
* children
*/
@JSONField(ordinal = 8)
protected List<ResourceComponent> children = new ArrayList<>();
/**
* add resource component
* @param resourceComponent resource component
*/
public void add(ResourceComponent resourceComponent){
children.add(resourceComponent);
}
public String getName(){
return this.name;
}
public String getDescription(){
return this.description;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public int getPid() {
return pid;
}
public void setPid(int pid) {
this.pid = pid;
}
public void setName(String name) {
this.name = name;
}
public String getFullName() {
return fullName;
}
public void setFullName(String fullName) {
this.fullName = fullName;
}
public void setDescription(String description) {
this.description = description;
}
public boolean isDirctory() {
return isDirctory;
}
public void setDirctory(boolean dirctory) {
isDirctory = dirctory;
}
public String getIdValue() {
return idValue;
}
public void setIdValue(int id,boolean isDirctory) {
int directoryFlag = isDirctory ? 1:0;
this.idValue = String.format("%s_%s",id,directoryFlag);
}
public ResourceType getType() {
return type;
}
public void setType(ResourceType type) {
this.type = type;
}
public List<ResourceComponent> getChildren() {
return children;
}
public void setChildren(List<ResourceComponent> children) {
this.children = children;
}
@Override
public String toString() {
return "ResourceComponent{" +
"id=" + id +
", pid=" + pid +
", name='" + name + '\'' +
", currentDir='" + currentDir + '\'' +
", fullName='" + fullName + '\'' +
", description='" + description + '\'' +
", isDirctory=" + isDirctory +
", idValue='" + idValue + '\'' +
", type=" + type +
", children=" + children +
'}';
}
}

28
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.List;
/**
* interface filter
*/
public interface IFilter {
List<Resource> filter();
}

100
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java

@ -0,0 +1,100 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/**
* resource filter
*/
public class ResourceFilter implements IFilter {
/**
* resource suffix
*/
private String suffix;
/**
* resource list
*/
private List<Resource> resourceList;
/**
* parent list
*/
//Set<Resource> parentList = new HashSet<>();
/**
* constructor
* @param suffix resource suffix
* @param resourceList resource list
*/
public ResourceFilter(String suffix, List<Resource> resourceList) {
this.suffix = suffix;
this.resourceList = resourceList;
}
/**
* file filter
* @return file filtered by suffix
*/
public Set<Resource> fileFilter(){
Set<Resource> resources = resourceList.stream().filter(t -> {
String alias = t.getAlias();
return alias.endsWith(suffix);
}).collect(Collectors.toSet());
return resources;
}
/**
* list all parent dir
* @return parent resource dir set
*/
Set<Resource> listAllParent(){
Set<Resource> parentList = new HashSet<>();
Set<Resource> filterFileList = fileFilter();
for(Resource file:filterFileList){
parentList.add(file);
setAllParent(file,parentList);
}
return parentList;
}
/**
* list all parent dir
* @param resource resource
* @return parent resource dir set
*/
private void setAllParent(Resource resource,Set<Resource> parentList){
for (Resource resourceTemp : resourceList) {
if (resourceTemp.getId() == resource.getPid()) {
parentList.add(resourceTemp);
setAllParent(resourceTemp,parentList);
}
}
}
@Override
public List<Resource> filter() {
return new ArrayList<>(listAllParent());
}
}

130
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java

@ -0,0 +1,130 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.Directory;
import org.apache.dolphinscheduler.api.dto.resources.FileLeaf;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.ArrayList;
import java.util.List;
/**
* resource tree visitor
*/
public class ResourceTreeVisitor implements Visitor{
/**
* resource list
*/
private List<Resource> resourceList;
public ResourceTreeVisitor() {
}
/**
* constructor
* @param resourceList resource list
*/
public ResourceTreeVisitor(List<Resource> resourceList) {
this.resourceList = resourceList;
}
/**
* visit
* @return resoruce component
*/
public ResourceComponent visit() {
ResourceComponent rootDirectory = new Directory();
for (Resource resource : resourceList) {
// judge whether is root node
if (rootNode(resource)){
ResourceComponent tempResourceComponent = getResourceComponent(resource);
rootDirectory.add(tempResourceComponent);
tempResourceComponent.setChildren(setChildren(tempResourceComponent.getId(),resourceList));
}
}
return rootDirectory;
}
/**
* set children
* @param id id
* @param list resource list
* @return resource component list
*/
public static List<ResourceComponent> setChildren(int id, List<Resource> list ){
List<ResourceComponent> childList = new ArrayList<>();
for (Resource resource : list) {
if (id == resource.getPid()){
ResourceComponent tempResourceComponent = getResourceComponent(resource);
childList.add(tempResourceComponent);
}
}
for (ResourceComponent resourceComponent : childList) {
resourceComponent.setChildren(setChildren(resourceComponent.getId(),list));
}
if (childList.size()==0){
return new ArrayList<>();
}
return childList;
}
/**
* Determine whether it is the root node
* @param resource resource
* @return true if it is the root node
*/
public boolean rootNode(Resource resource) {
boolean isRootNode = true;
if(resource.getPid() != -1 ){
for (Resource parent : resourceList) {
if (resource.getPid() == parent.getId()) {
isRootNode = false;
break;
}
}
}
return isRootNode;
}
/**
* get resource component by resource
* @param resource resource
* @return resource component
*/
private static ResourceComponent getResourceComponent(Resource resource) {
ResourceComponent tempResourceComponent;
if(resource.isDirectory()){
tempResourceComponent = new Directory();
}else{
tempResourceComponent = new FileLeaf();
}
tempResourceComponent.setName(resource.getAlias());
tempResourceComponent.setFullName(resource.getFullName().replaceFirst("/",""));
tempResourceComponent.setId(resource.getId());
tempResourceComponent.setPid(resource.getPid());
tempResourceComponent.setIdValue(resource.getId(),resource.isDirectory());
tempResourceComponent.setDescription(resource.getDescription());
tempResourceComponent.setType(resource.getType());
return tempResourceComponent;
}
}

31
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java

@ -0,0 +1,31 @@
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Visitor
*/
public interface Visitor {
/**
* visit
* @return resource component
*/
ResourceComponent visit();
}

12
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java

@ -97,7 +97,7 @@ public enum Status {
VERIFY_UDF_FUNCTION_NAME_ERROR( 10070,"verify udf function name error", "UDF函数名称验证错误"),
DELETE_UDF_FUNCTION_ERROR( 10071,"delete udf function error", "删除UDF函数错误"),
AUTHORIZED_FILE_RESOURCE_ERROR( 10072,"authorized file resource error", "授权资源文件错误"),
UNAUTHORIZED_FILE_RESOURCE_ERROR( 10073,"unauthorized file resource error", "查询未授权资源错误"),
AUTHORIZE_RESOURCE_TREE( 10073,"authorize resource tree display error","授权资源目录树错误"),
UNAUTHORIZED_UDF_FUNCTION_ERROR( 10074,"unauthorized udf function error", "查询未授权UDF函数错误"),
AUTHORIZED_UDF_FUNCTION_ERROR(10075,"authorized udf function error", "授权UDF函数错误"),
CREATE_SCHEDULE_ERROR(10076,"create schedule error", "创建调度配置错误"),
@ -184,10 +184,12 @@ public enum Status {
RESOURCE_SIZE_EXCEED_LIMIT(20007, "upload resource file size exceeds limit", "上传资源文件大小超过限制"),
RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified", "资源文件后缀不支持修改"),
UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar", "UDF资源文件后缀名只支持[jar]"),
HDFS_COPY_FAIL(20009, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"),
RESOURCE_FILE_EXIST(20010, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"),
RESOURCE_FILE_NOT_EXIST(20011, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"),
HDFS_COPY_FAIL(20010, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"),
RESOURCE_FILE_EXIST(20011, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"),
RESOURCE_FILE_NOT_EXIST(20012, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"),
UDF_RESOURCE_IS_BOUND(20013, "udf resource file is bound by UDF functions:{0}","udf函数绑定了资源文件[{0}]"),
RESOURCE_IS_USED(20014, "resource file is used by process definition","资源文件被上线的流程定义使用了"),
PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist","父资源文件不存在"),
USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"),

32
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java

@ -17,10 +17,15 @@
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
@ -30,10 +35,6 @@ import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
@ -473,12 +474,19 @@ public class DataSourceService extends BaseService{
* @return datasource parameter
*/
public String buildParameter(String name, String desc, DbType type, String host,
String port, String database,String principal,String userName,
String password, String other) {
String port, String database, String principal, String userName,
String password, DbConnectType connectType, String other) {
String address = buildAddress(type, host, port, connectType);
String address = buildAddress(type, host, port);
String jdbcUrl;
if (Constants.ORACLE.equals(type.name())
&& connectType == DbConnectType.ORACLE_SID) {
jdbcUrl = address + ":" + database;
} else {
jdbcUrl = address + "/" + database;
}
String jdbcUrl = address + "/" + database;
if (CommonUtils.getKerberosStartupState() &&
(type == DbType.HIVE || type == DbType.SPARK)){
jdbcUrl += ";principal=" + principal;
@ -531,7 +539,7 @@ public class DataSourceService extends BaseService{
}
private String buildAddress(DbType type, String host, String port) {
private String buildAddress(DbType type, String host, String port, DbConnectType connectType) {
StringBuilder sb = new StringBuilder();
if (Constants.MYSQL.equals(type.name())) {
sb.append(Constants.JDBC_MYSQL);
@ -552,7 +560,11 @@ public class DataSourceService extends BaseService{
sb.append(Constants.JDBC_CLICKHOUSE);
sb.append(host).append(":").append(port);
} else if (Constants.ORACLE.equals(type.name())) {
sb.append(Constants.JDBC_ORACLE);
if (connectType == DbConnectType.ORACLE_SID) {
sb.append(Constants.JDBC_ORACLE_SID);
} else {
sb.append(Constants.JDBC_ORACLE_SERVICE_NAME);
}
sb.append(host).append(":").append(port);
} else if (Constants.SQLSERVER.equals(type.name())) {
sb.append(Constants.JDBC_SQLSERVER);

34
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java

@ -38,11 +38,9 @@ import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
@ -162,6 +160,31 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
/**
* get resource ids
* @param processData process data
* @return resource ids
*/
private String getResourceIds(ProcessData processData) {
List<TaskNode> tasks = processData.getTasks();
Set<Integer> resourceIds = new HashSet<>();
for(TaskNode taskNode : tasks){
String taskParameter = taskNode.getParams();
AbstractParameters params = TaskParametersUtils.getParameters(taskNode.getType(),taskParameter);
Set<Integer> tempSet = params.getResourceFilesList().stream().map(t->t.getId()).collect(Collectors.toSet());
resourceIds.addAll(tempSet);
}
StringBuilder sb = new StringBuilder();
for(int i : resourceIds) {
if (sb.length() > 0) {
sb.append(",");
}
sb.append(i);
}
return sb.toString();
}
/**
* query proccess definition list
@ -946,7 +969,9 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
@ -1163,6 +1188,7 @@ public class ProcessDefinitionService extends BaseDAGService {
private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) throws Exception {
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//check process data

8
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java

@ -204,14 +204,8 @@ public class ProcessInstanceService extends BaseDAGService {
}
}
Set<String> exclusionSet = new HashSet<>();
exclusionSet.add(Constants.CLASS);
exclusionSet.add("locations");
exclusionSet.add("connects");
exclusionSet.add("processInstanceJson");
pageInfo.setTotalCount((int) processInstanceList.getTotal());
pageInfo.setLists(CollectionUtils.getListByExclusion(processInstances, exclusionSet));
pageInfo.setLists(processInstances);
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
return result;

559
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java

@ -16,9 +16,15 @@
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
@ -39,6 +45,7 @@ import org.springframework.web.multipart.MultipartFile;
import java.text.MessageFormat;
import java.util.*;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
@ -65,6 +72,82 @@ public class ResourcesService extends BaseService {
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
@ -73,6 +156,8 @@ public class ResourcesService extends BaseService {
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
@ -80,7 +165,9 @@ public class ResourcesService extends BaseService {
String name,
String desc,
ResourceType type,
MultipartFile file) {
MultipartFile file,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
@ -123,7 +210,8 @@ public class ResourcesService extends BaseService {
}
// check resoure name exists
if (checkResourceExists(name, 0, type.ordinal())) {
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
@ -131,7 +219,9 @@ public class ResourcesService extends BaseService {
Date now = new Date();
Resource resource = new Resource(name,file.getOriginalFilename(),desc,loginUser.getId(),type,file.getSize(),now,now);
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
@ -151,7 +241,7 @@ public class ResourcesService extends BaseService {
}
// fail upload
if (!upload(loginUser, name, file, type)) {
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
@ -162,27 +252,29 @@ public class ResourcesService extends BaseService {
/**
* check resource is exists
*
* @param alias alias
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String alias, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(alias, userId, type);
return CollectionUtils.isNotEmpty(resources);
}
private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
return false;
}
/**
* update resource
*
* @param loginUser login user
* @param name alias
* @param resourceId resource id
* @param type resource type
* @param desc description
* @return update result code
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
@ -216,7 +308,10 @@ public class ResourcesService extends BaseService {
}
//check resource aleady exists
if (!resource.getAlias().equals(name) && checkResourceExists(name, 0, type.ordinal())) {
String originFullName = resource.getFullName();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!resource.getAlias().equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
@ -227,25 +322,41 @@ public class ResourcesService extends BaseService {
if (StringUtils.isEmpty(tenantCode)){
return result;
}
//get the file suffix
String nameWithSuffix = name;
String originResourceName = resource.getAlias();
String suffix = originResourceName.substring(originResourceName.lastIndexOf('.'));
if (!resource.isDirectory()) {
//get the file suffix
//if the name without suffix then add it ,else use the origin name
String nameWithSuffix = name;
if(!name.endsWith(suffix)){
nameWithSuffix = nameWithSuffix + suffix;
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
//if the name without suffix then add it ,else use the origin name
if(!name.endsWith(suffix)){
nameWithSuffix = nameWithSuffix + suffix;
}
}
// updateResource data
List<Integer> childrenResource = listAllChildren(resource);
String oldFullName = resource.getFullName();
Date now = new Date();
resource.setAlias(nameWithSuffix);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory() && CollectionUtils.isNotEmpty(childrenResource)) {
List<Resource> childResourceList = new ArrayList<>();
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(oldFullName, fullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
@ -267,15 +378,9 @@ public class ResourcesService extends BaseService {
// get file hdfs path
// delete hdfs file by type
String originHdfsFileName = "";
String destHdfsFileName = "";
if (resource.getType().equals(ResourceType.FILE)) {
originHdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, originResourceName);
destHdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, name);
} else if (resource.getType().equals(ResourceType.UDF)) {
originHdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, originResourceName);
destHdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, name);
}
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
if (HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.info("hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
@ -303,7 +408,7 @@ public class ResourcesService extends BaseService {
* @param pageSize page size
* @return resource list page
*/
public Map<String, Object> queryResourceListPaging(User loginUser, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize);
@ -312,7 +417,7 @@ public class ResourcesService extends BaseService {
userId= 0;
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId, type.ordinal(), searchVal);
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
@ -321,17 +426,46 @@ public class ResourcesService extends BaseService {
return result;
}
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser
* @param name
* @param file
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String name, MultipartFile file, ResourceType type) {
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
@ -344,15 +478,8 @@ public class ResourcesService extends BaseService {
// save file to hdfs, and delete original file
String hdfsFilename = "";
String resourcePath = "";
if (type.equals(ResourceType.FILE)) {
hdfsFilename = HadoopUtils.getHdfsFilename(tenantCode, name);
resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
} else if (type.equals(ResourceType.UDF)) {
hdfsFilename = HadoopUtils.getHdfsUdfFilename(tenantCode, name);
resourcePath = HadoopUtils.getHdfsUdfDir(tenantCode);
}
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
@ -377,13 +504,59 @@ public class ResourcesService extends BaseService {
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
List<Resource> resourceList;
Set<Resource> allResourceList = getAllResources(loginUser, type);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(new ArrayList<>(allResourceList));
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get all resources
* @param loginUser login user
* @return all resource set
*/
private Set<Resource> getAllResources(User loginUser, ResourceType type) {
int userId = loginUser.getId();
boolean listChildren = true;
if(isAdmin(loginUser)){
userId = 0;
listChildren = false;
}
List<Resource> resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
Set<Resource> allResourceList = new HashSet<>(resourceList);
if (listChildren) {
Set<Integer> authorizedIds = new HashSet<>();
List<Resource> authorizedDirecoty = resourceList.stream().filter(t->t.getUserId() != loginUser.getId() && t.isDirectory()).collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(authorizedDirecoty)) {
for(Resource resource : authorizedDirecoty){
authorizedIds.addAll(listAllChildren(resource));
}
List<Resource> childrenResources = resourcesMapper.listResourceByIds(authorizedIds.toArray(new Integer[authorizedIds.size()]));
allResourceList.addAll(childrenResources);
}
}
resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
result.put(Constants.DATA_LIST, resourceList);
return allResourceList;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
Set<Resource> allResourceList = getAllResources(loginUser, type);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
@ -419,23 +592,51 @@ public class ResourcesService extends BaseService {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new int[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if (tenant == null){
putMsg(result, Status.TENANT_NOT_EXIST);
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// get all resource id of process definitions those is released
Map<Integer, Set<Integer>> resourceProcessMap = getResourceProcessMap();
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource);
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
String hdfsFilename = "";
// delete hdfs file by type
String tenantCode = tenant.getTenantCode();
hdfsFilename = getHdfsFileName(resource, tenantCode, hdfsFilename);
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteById(resourceId);
resourcesMapper.deleteIds(allChildren.toArray(new Integer[allChildren.size()]));
resourceUserMapper.deleteResourceUser(0, resourceId);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, false);
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
@ -444,15 +645,15 @@ public class ResourcesService extends BaseService {
/**
* verify resource by name and type
* @param loginUser login user
* @param name resource alias
* @param type resource type
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
public Result verifyResourceName(String name, ResourceType type,User loginUser) {
public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(name, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
@ -461,9 +662,9 @@ public class ResourcesService extends BaseService {
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = getHdfsFileName(type,tenantCode,name);
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, name,hdfsFilename);
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
@ -480,6 +681,48 @@ public class ResourcesService extends BaseService {
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
@ -501,7 +744,7 @@ public class ResourcesService extends BaseService {
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resouce file not exist, resource id {}", resourceId);
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
@ -511,7 +754,7 @@ public class ResourcesService extends BaseService {
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support view, resource id {}", nameSuffix, resourceId);
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
@ -523,7 +766,7 @@ public class ResourcesService extends BaseService {
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias());
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
@ -559,7 +802,7 @@ public class ResourcesService extends BaseService {
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content) {
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
@ -581,15 +824,16 @@ public class ResourcesService extends BaseService {
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(name,type,loginUser);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(name,name,desc,loginUser.getId(),type,content.getBytes().length,now,now);
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
@ -605,7 +849,7 @@ public class ResourcesService extends BaseService {
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(name, tenantCode, content);
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
@ -657,7 +901,7 @@ public class ResourcesService extends BaseService {
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getAlias(), tenantCode, content);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
@ -665,10 +909,10 @@ public class ResourcesService extends BaseService {
}
/**
* @param resourceName
* @param tenantCode
* @param content
* @return
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
@ -684,8 +928,8 @@ public class ResourcesService extends BaseService {
return result;
}
// get file hdfs path
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resourceName);
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
@ -729,11 +973,14 @@ public class ResourcesService extends BaseService {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
String hdfsFileName = "";
hdfsFileName = getHdfsFileName(resource, tenantCode, hdfsFileName);
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getAlias());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
@ -743,6 +990,33 @@ public class ResourcesService extends BaseService {
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
@ -757,8 +1031,8 @@ public class ResourcesService extends BaseService {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Object> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
List<Resource> list ;
if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
@ -767,15 +1041,12 @@ public class ResourcesService extends BaseService {
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
@ -841,46 +1112,15 @@ public class ResourcesService extends BaseService {
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
result.put(Constants.DATA_LIST, authedResources);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get hdfs file name
*
* @param resource resource
* @param tenantCode tenant code
* @param hdfsFileName hdfs file name
* @return hdfs file name
*/
private String getHdfsFileName(Resource resource, String tenantCode, String hdfsFileName) {
if (resource.getType().equals(ResourceType.FILE)) {
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias());
} else if (resource.getType().equals(ResourceType.UDF)) {
hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, resource.getAlias());
}
return hdfsFileName;
}
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param hdfsFileName hdfs file name
* @return hdfs file name
*/
private String getHdfsFileName(ResourceType resourceType, String tenantCode, String hdfsFileName) {
if (resourceType.equals(ResourceType.FILE)) {
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, hdfsFileName);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, hdfsFileName);
}
return hdfsFileName;
}
/**
* get authorized resource list
*
@ -920,4 +1160,69 @@ public class ResourcesService extends BaseService {
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @return all children id
*/
List<Integer> listAllChildren(Resource resource){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
/**
* get resource process map key is resource id,value is the set of process definition
* @return resource process definition map
*/
private Map<Integer,Set<Integer>> getResourceProcessMap(){
Map<Integer, String> map = new HashMap<>();
Map<Integer, Set<Integer>> result = new HashMap<>();
List<Map<String, Object>> list = processDefinitionMapper.listResources();
if (CollectionUtils.isNotEmpty(list)) {
for (Map<String, Object> tempMap : list) {
map.put((Integer) tempMap.get("id"), (String)tempMap.get("resource_ids"));
}
}
for (Map.Entry<Integer, String> entry : map.entrySet()) {
Integer mapKey = entry.getKey();
String[] arr = entry.getValue().split(",");
Set<Integer> mapValues = Arrays.stream(arr).map(Integer::parseInt).collect(Collectors.toSet());
for (Integer value : mapValues) {
if (result.containsKey(value)) {
Set<Integer> set = result.get(value);
set.add(mapKey);
result.put(value, set);
} else {
Set<Integer> set = new HashSet<>();
set.add(mapKey);
result.put(value, set);
}
}
}
return result;
}
}

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java

@ -118,7 +118,7 @@ public class UdfFuncService extends BaseService{
}
udf.setDescription(desc);
udf.setResourceId(resourceId);
udf.setResourceName(resource.getAlias());
udf.setResourceName(resource.getFullName());
udf.setType(type);
udf.setCreateTime(now);
@ -226,7 +226,7 @@ public class UdfFuncService extends BaseService{
}
udf.setDescription(desc);
udf.setResourceId(resourceId);
udf.setResourceName(resource.getAlias());
udf.setResourceName(resource.getFullName());
udf.setType(type);
udf.setUpdateTime(now);

4
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java

@ -39,6 +39,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
* data source controller test
*/
public class DataSourceControllerTest extends AbstractControllerTest{
private static Logger logger = LoggerFactory.getLogger(DataSourceControllerTest.class);
@Ignore
@ -95,6 +96,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
@Ignore
@Test
public void testQueryDataSource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
@ -169,6 +171,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
}
@Ignore
@Test
public void testConnectionTest() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
@ -248,6 +251,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
@Ignore
@Test
public void testDelete() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();

58
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java

@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* resource filter test
*/
public class ResourceFilterTest {
private static Logger logger = LoggerFactory.getLogger(ResourceFilterTest.class);
@Test
public void filterTest(){
List<Resource> allList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
allList.add(resource1);
allList.add(resource2);
allList.add(resource3);
allList.add(resource4);
allList.add(resource5);
allList.add(resource6);
allList.add(resource7);
ResourceFilter resourceFilter = new ResourceFilter(".jar",allList);
List<Resource> resourceList = resourceFilter.filter();
Assert.assertNotNull(resourceList);
resourceList.stream().forEach(t-> logger.info(t.toString()));
}
}

82
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java

@ -0,0 +1,82 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
/**
* resource tree visitor test
*/
public class ResourceTreeVisitorTest {
@Test
public void visit() throws Exception {
List<Resource> resourceList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
resourceList.add(resource1);
resourceList.add(resource2);
resourceList.add(resource3);
resourceList.add(resource4);
resourceList.add(resource5);
resourceList.add(resource6);
resourceList.add(resource7);
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(resourceList);
ResourceComponent resourceComponent = resourceTreeVisitor.visit();
Assert.assertNotNull(resourceComponent.getChildren());
}
@Test
public void rootNode() throws Exception {
List<Resource> resourceList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
resourceList.add(resource1);
resourceList.add(resource2);
resourceList.add(resource3);
resourceList.add(resource4);
resourceList.add(resource5);
resourceList.add(resource6);
resourceList.add(resource7);
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(resourceList);
Assert.assertTrue(resourceTreeVisitor.rootNode(resource1));
Assert.assertTrue(resourceTreeVisitor.rootNode(resource2));
Assert.assertFalse(resourceTreeVisitor.rootNode(resource3));
}
}

2
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/enums/StatusTest.java

@ -28,7 +28,7 @@ public class StatusTest {
@Test
public void testGetCode() {
assertEquals(Status.SUCCESS.getCode(), 0);
assertEquals(0, Status.SUCCESS.getCode());
assertNotEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR.getCode(), 0);
}

112
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java

@ -24,10 +24,7 @@ import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
@ -40,6 +37,7 @@ import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.omg.CORBA.Any;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
@ -73,6 +71,8 @@ public class ResourcesServiceTest {
private UserMapper userMapper;
@Mock
private UdfFuncMapper udfFunctionMapper;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
@Before
public void setUp() {
@ -96,14 +96,14 @@ public class ResourcesServiceTest {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null);
Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_FILE_IS_EMPTY
MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf",new String().getBytes());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile);
result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(),result.getMsg());
@ -111,31 +111,42 @@ public class ResourcesServiceTest {
mockMultipartFile = new MockMultipartFile("test.pdf","test.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("test.pdf")).thenReturn("pdf");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile);
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf","ResourcesServiceTest.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.pdf")).thenReturn("pdf");
result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR
Mockito.when(tenantMapper.queryById(0)).thenReturn(getTenant());
Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest.jar", 0, 1)).thenReturn(getResourceList());
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.jar","ResourcesServiceTest.jar","pdf",new String("test").getBytes());
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
}
@Test
public void testCreateDirecotry(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//SUCCESS
Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest.jar", 0, 1)).thenReturn(new ArrayList<>());
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
//PARENT_RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
Assert.assertEquals(Status.PARENT_RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.queryResourceList("/directoryTest", 0, 0)).thenReturn(getResourceList());
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
}
@ -163,41 +174,46 @@ public class ResourcesServiceTest {
//SUCCESS
user.setId(1);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest.jar",ResourceType.FILE);
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
//RESOURCE_EXIST
Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.FILE);
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
//USER_NOT_EXIST
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsFilename(Mockito.any(), Mockito.any())).thenReturn("test1");
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test1");
try {
Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
} catch (IOException e) {
e.printStackTrace();
}
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//SUCCESS
PowerMockito.when(HadoopUtils.getHdfsFilename(Mockito.any(), Mockito.any())).thenReturn("test");
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test");
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
@ -212,8 +228,8 @@ public class ResourcesServiceTest {
resourcePage.setTotal(1);
resourcePage.setRecords(getResourceList());
Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class),
Mockito.eq(0), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
Map<String, Object> result = resourcesService.queryResourceListPaging(loginUser,ResourceType.FILE,"test",1,10);
Mockito.eq(0),Mockito.eq(-1), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
Map<String, Object> result = resourcesService.queryResourceListPaging(loginUser,-1,ResourceType.FILE,"test",1,10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
PageInfo pageInfo = (PageInfo) result.get(Constants.DATA_LIST);
@ -263,6 +279,7 @@ public class ResourcesServiceTest {
//TENANT_NOT_EXIST
loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setTenantId(2);
Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(loginUser);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
@ -285,14 +302,20 @@ public class ResourcesServiceTest {
User user = new User();
user.setId(1);
Mockito.when(resourcesMapper.queryResourceList("test", 0, 0)).thenReturn(getResourceList());
Result result = resourcesService.verifyResourceName("test",ResourceType.FILE,user);
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest.jar", 0, 0)).thenReturn(getResourceList());
Result result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.verifyResourceName("test1",ResourceType.FILE,user);
String unExistFullName = "/test.jar";
try {
Mockito.when(hadoopUtils.exists(unExistFullName)).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.verifyResourceName("/test.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
@ -304,10 +327,10 @@ public class ResourcesServiceTest {
} catch (IOException e) {
logger.error("hadoop error",e);
}
PowerMockito.when(HadoopUtils.getHdfsFilename("123", "test1")).thenReturn("test");
result = resourcesService.verifyResourceName("test1",ResourceType.FILE,user);
PowerMockito.when(HadoopUtils.getHdfsResourceFileName("123", "test1")).thenReturn("test");
result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_FILE_EXIST.getCode()==result.getCode());
Assert.assertTrue(Status.RESOURCE_EXIST.getCode()==result.getCode());
//SUCCESS
result = resourcesService.verifyResourceName("test2",ResourceType.FILE,user);
@ -389,14 +412,14 @@ public class ResourcesServiceTest {
PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir");
User user = getUser();
//HDFS_NOT_STARTUP
Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content");
Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
@ -404,7 +427,7 @@ public class ResourcesServiceTest {
try {
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content");
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content",-1,"/");
}catch (RuntimeException ex){
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage());
@ -413,7 +436,7 @@ public class ResourcesServiceTest {
//SUCCESS
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
@ -584,13 +607,26 @@ public class ResourcesServiceTest {
private Resource getResource(){
Resource resource = new Resource();
resource.setPid(-1);
resource.setUserId(1);
resource.setDescription("ResourcesServiceTest.jar");
resource.setAlias("ResourcesServiceTest.jar");
resource.setFullName("/ResourcesServiceTest.jar");
resource.setType(ResourceType.FILE);
return resource;
}
private Resource getUdfResource(){
Resource resource = new Resource();
resource.setUserId(1);
resource.setDescription("udfTest");
resource.setAlias("udfTest.jar");
resource.setFullName("/udfTest.jar");
resource.setType(ResourceType.UDF);
return resource;
}
private UdfFunc getUdfFunc(){
UdfFunc udfFunc = new UdfFunc();

7
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java

@ -43,6 +43,7 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Map;
import static org.junit.Assert.*;
@ -173,7 +174,11 @@ public class CheckUtilsTest {
// MapreduceParameters
MapreduceParameters mapreduceParameters = new MapreduceParameters();
assertFalse(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString()));
mapreduceParameters.setMainJar(new ResourceInfo());
ResourceInfo resourceInfoMapreduce = new ResourceInfo();
resourceInfoMapreduce.setId(1);
resourceInfoMapreduce.setRes("");
mapreduceParameters.setMainJar(resourceInfoMapreduce);
mapreduceParameters.setProgramType(ProgramType.JAVA);
assertTrue(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString()));

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java

@ -972,7 +972,8 @@ public final class Constants {
public static final String JDBC_POSTGRESQL = "jdbc:postgresql://";
public static final String JDBC_HIVE_2 = "jdbc:hive2://";
public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://";
public static final String JDBC_ORACLE = "jdbc:oracle:thin:@//";
public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@";
public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//";
public static final String JDBC_SQLSERVER = "jdbc:sqlserver://";
public static final String JDBC_DB2 = "jdbc:db2://";

12
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java

@ -23,13 +23,17 @@ import com.baomidou.mybatisplus.annotation.EnumValue;
*/
public enum AuthorizationType {
/**
* 0 RESOURCE_FILE;
* 0 RESOURCE_FILE_ID;
* 0 RESOURCE_FILE_NAME;
* 1 UDF_FILE;
* 1 DATASOURCE;
* 2 UDF;
*/
RESOURCE_FILE(0, "resource file"),
DATASOURCE(1, "data source"),
UDF(2, "udf function");
RESOURCE_FILE_ID(0, "resource file id"),
RESOURCE_FILE_NAME(1, "resource file name"),
UDF_FILE(2, "udf file"),
DATASOURCE(3, "data source"),
UDF(4, "udf function");
AuthorizationType(int code, String descp){
this.code = code;

44
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java

@ -0,0 +1,44 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
public enum DbConnectType {
ORACLE_SERVICE_NAME(0, "Oracle Service Name"),
ORACLE_SID(1, "Oracle SID");
DbConnectType(int code, String descp) {
this.code = code;
this.descp = descp;
}
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java

@ -23,6 +23,16 @@ public class ResourceInfo {
/**
* res the name of the resource that was uploaded
*/
private int id;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
private String res;
public String getRes() {

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java

@ -17,6 +17,7 @@
package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import java.util.LinkedHashMap;
import java.util.List;
@ -31,7 +32,7 @@ public abstract class AbstractParameters implements IParameters {
public abstract boolean checkParameters();
@Override
public abstract List<String> getResourceFilesList();
public abstract List<ResourceInfo> getResourceFilesList();
/**
* local parameters

4
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java

@ -16,6 +16,8 @@
*/
package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import java.util.List;
/**
@ -34,5 +36,5 @@ public interface IParameters {
*
* @return resource files list
*/
List<String> getResourceFilesList();
List<ResourceInfo> getResourceFilesList();
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task.conditions;
import org.apache.dolphinscheduler.common.enums.DependentRelation;
import org.apache.dolphinscheduler.common.model.DependentTaskModel;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.List;
@ -41,7 +42,7 @@ public class ConditionsParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return null;
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java

@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
/**
@ -198,7 +199,7 @@ public class DataxParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task.dependent;
import org.apache.dolphinscheduler.common.enums.DependentRelation;
import org.apache.dolphinscheduler.common.model.DependentTaskModel;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.ArrayList;
@ -36,7 +37,7 @@ public class DependentParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

29
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java

@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.flink;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.Collections;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* spark parameters
@ -50,35 +50,35 @@ public class FlinkParameters extends AbstractParameters {
private String mainArgs;
/**
* slot个数
* slot count
*/
private int slot;
/**
*Yarn application的名字
*Yarn application name
*/
private String appName;
/**
* taskManager 数量
* taskManager count
*/
private int taskManager;
/**
* jobManagerMemory 内存大小
* job manager memory
*/
private String jobManagerMemory ;
/**
* taskManagerMemory内存大小
* task manager memory
*/
private String taskManagerMemory;
/**
* resource list
*/
private List<ResourceInfo> resourceList;
private List<ResourceInfo> resourceList = new ArrayList<>();
/**
* The YARN queue to submit to
@ -207,16 +207,11 @@ public class FlinkParameters extends AbstractParameters {
@Override
public List<String> getResourceFilesList() {
if(resourceList != null ) {
List<String> resourceFiles = resourceList.stream()
.map(ResourceInfo::getRes).collect(Collectors.toList());
if(mainJar != null) {
resourceFiles.add(mainJar.getRes());
}
return resourceFiles;
public List<ResourceInfo> getResourceFilesList() {
if (mainJar != null && !resourceList.contains(mainJar)) {
resourceList.add(mainJar);
}
return Collections.emptyList();
return resourceList;
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java

@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.common.task.http;
import org.apache.dolphinscheduler.common.enums.HttpCheckCondition;
import org.apache.dolphinscheduler.common.enums.HttpMethod;
import org.apache.dolphinscheduler.common.process.HttpProperty;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
@ -62,7 +63,7 @@ public class HttpParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

20
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java

@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.mr;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.Collections;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class MapreduceParameters extends AbstractParameters {
@ -54,7 +54,7 @@ public class MapreduceParameters extends AbstractParameters {
/**
* resource list
*/
private List<ResourceInfo> resourceList;
private List<ResourceInfo> resourceList = new ArrayList<>();
/**
* program type
@ -125,16 +125,12 @@ public class MapreduceParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
if(resourceList != null ) {
List<String> resourceFiles = resourceList.stream()
.map(ResourceInfo::getRes).collect(Collectors.toList());
if(mainJar != null) {
resourceFiles.add(mainJar.getRes());
}
return resourceFiles;
public List<ResourceInfo> getResourceFilesList() {
if (mainJar != null && !resourceList.contains(mainJar)) {
resourceList.add(mainJar);
}
return Collections.emptyList();
return resourceList;
}
@Override

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java

@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.task.procedure;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
@ -74,7 +75,7 @@ public class ProcedureParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java

@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.List;
import java.util.stream.Collectors;
public class PythonParameters extends AbstractParameters {
/**
@ -56,12 +55,7 @@ public class PythonParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
if (resourceList != null) {
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
public List<ResourceInfo> getResourceFilesList() {
return this.resourceList;
}
}

9
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java

@ -59,12 +59,7 @@ public class ShellParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
if (resourceList != null) {
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
public List<ResourceInfo> getResourceFilesList() {
return resourceList;
}
}

16
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java

@ -19,9 +19,10 @@ package org.apache.dolphinscheduler.common.task.spark;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* spark parameters
@ -77,7 +78,7 @@ public class SparkParameters extends AbstractParameters {
/**
* resource list
*/
private List<ResourceInfo> resourceList;
private List<ResourceInfo> resourceList = new ArrayList<>();
/**
* The YARN queue to submit to
@ -218,15 +219,12 @@ public class SparkParameters extends AbstractParameters {
return mainJar != null && programType != null && sparkVersion != null;
}
@Override
public List<String> getResourceFilesList() {
if(resourceList !=null ) {
this.resourceList.add(mainJar);
return resourceList.stream()
.map(ResourceInfo::getRes).collect(Collectors.toList());
public List<ResourceInfo> getResourceFilesList() {
if (mainJar != null && !resourceList.contains(mainJar)) {
resourceList.add(mainJar);
}
return null;
return resourceList;
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java

@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.task.sql;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
@ -189,7 +190,7 @@ public class SqlParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java

@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.task.sqoop;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.StringUtils;
@ -111,7 +112,7 @@ public class SqoopParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java

@ -15,6 +15,7 @@
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.task.subprocess;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.ArrayList;
@ -42,7 +43,7 @@ public class SubProcessParameters extends AbstractParameters {
}
@Override
public List<String> getResourceFilesList() {
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}
}

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java

@ -23,7 +23,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
*/
public class Stopper {
private static volatile AtomicBoolean signal = new AtomicBoolean(false);
private static AtomicBoolean signal = new AtomicBoolean(false);
public static final boolean isStopped(){
return signal.get();

49
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java

@ -26,6 +26,7 @@ import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.io.IOUtils;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
@ -415,6 +416,22 @@ public class HadoopUtils implements Closeable {
}
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @return hdfs resource dir
*/
public static String getHdfsDir(ResourceType resourceType,String tenantCode) {
String hdfsDir = "";
if (resourceType.equals(ResourceType.FILE)) {
hdfsDir = getHdfsResDir(tenantCode);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsDir = getHdfsUdfDir(tenantCode);
}
return hdfsDir;
}
/**
* hdfs resource dir
*
@ -450,22 +467,42 @@ public class HadoopUtils implements Closeable {
* get absolute path and name for file on hdfs
*
* @param tenantCode tenant code
* @param filename file name
* @param fileName file name
* @return get absolute path and name for file on hdfs
*/
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param fileName file name
* @return hdfs file name
*/
public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) {
return String.format("%s/%s", getHdfsDir(resourceType,tenantCode), fileName);
}
/**
* get absolute path and name for resource file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for file on hdfs
*/
public static String getHdfsFilename(String tenantCode, String filename) {
return String.format("%s/%s", getHdfsResDir(tenantCode), filename);
public static String getHdfsResourceFileName(String tenantCode, String fileName) {
return String.format("%s/%s", getHdfsResDir(tenantCode), fileName);
}
/**
* get absolute path and name for udf file on hdfs
*
* @param tenantCode tenant code
* @param filename file name
* @param fileName file name
* @return get absolute path and name for udf file on hdfs
*/
public static String getHdfsUdfFilename(String tenantCode, String filename) {
return String.format("%s/%s", getHdfsUdfDir(tenantCode), filename);
public static String getHdfsUdfFileName(String tenantCode, String fileName) {
return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName);
}
/**

14
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/FlinkParametersTest.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.junit.Assert;
import org.junit.Test;
@ -28,8 +29,7 @@ public class FlinkParametersTest {
@Test
public void getResourceFilesList() {
FlinkParameters flinkParameters = new FlinkParameters();
Assert.assertNotNull(flinkParameters.getResourceFilesList());
Assert.assertTrue(flinkParameters.getResourceFilesList().isEmpty());
Assert.assertTrue(CollectionUtils.isEmpty(flinkParameters.getResourceFilesList()));
ResourceInfo mainResource = new ResourceInfo();
mainResource.setRes("testFlinkMain-1.0.0-SNAPSHOT.jar");
@ -41,15 +41,17 @@ public class FlinkParametersTest {
resourceInfos.add(resourceInfo1);
flinkParameters.setResourceList(resourceInfos);
Assert.assertNotNull(flinkParameters.getResourceFilesList());
Assert.assertEquals(2, flinkParameters.getResourceFilesList().size());
List<ResourceInfo> resourceFilesList = flinkParameters.getResourceFilesList();
Assert.assertNotNull(resourceFilesList);
Assert.assertEquals(2, resourceFilesList.size());
ResourceInfo resourceInfo2 = new ResourceInfo();
resourceInfo2.setRes("testFlinkParameters2.jar");
resourceInfos.add(resourceInfo2);
flinkParameters.setResourceList(resourceInfos);
Assert.assertNotNull(flinkParameters.getResourceFilesList());
Assert.assertEquals(3, flinkParameters.getResourceFilesList().size());
resourceFilesList = flinkParameters.getResourceFilesList();
Assert.assertNotNull(resourceFilesList);
Assert.assertEquals(3, resourceFilesList.size());
}
}

6
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringTest.java

@ -24,12 +24,6 @@ import java.util.List;
public class StringTest {
@Test
public void test1(){
System.out.println(String.format("%s_%010d_%010d", String.valueOf(1), Long.valueOf(3), Integer.valueOf(4)));
}
@Test
public void stringCompareTest(){

15
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java

@ -17,13 +17,28 @@
package org.apache.dolphinscheduler.dao.datasource;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* data source of Oracle
*/
public class OracleDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(OracleDataSource.class);
private DbConnectType type;
public DbConnectType getType() {
return type;
}
public void setType(DbConnectType type) {
this.type = type;
}
/**
* @return driver class
*/

15
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessDefinition.java

@ -163,6 +163,11 @@ public class ProcessDefinition {
*/
private String modifyBy;
/**
* resource ids
*/
private String resourceIds;
public String getName() {
return name;
@ -334,6 +339,14 @@ public class ProcessDefinition {
this.scheduleReleaseState = scheduleReleaseState;
}
public String getResourceIds() {
return resourceIds;
}
public void setResourceIds(String resourceIds) {
this.resourceIds = resourceIds;
}
public int getTimeout() {
return timeout;
}
@ -393,6 +406,8 @@ public class ProcessDefinition {
", timeout=" + timeout +
", tenantId=" + tenantId +
", modifyBy='" + modifyBy + '\'' +
", resourceIds='" + resourceIds + '\'' +
'}';
}
}

68
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Resource.java

@ -32,11 +32,26 @@ public class Resource {
@TableId(value="id", type=IdType.AUTO)
private int id;
/**
* parent id
*/
private int pid;
/**
* resource alias
*/
private String alias;
/**
* full name
*/
private String fullName;
/**
* is directory
*/
private boolean isDirectory=false;
/**
* description
*/
@ -89,7 +104,15 @@ public class Resource {
this.updateTime = updateTime;
}
public Resource(String alias, String fileName, String description, int userId, ResourceType type, long size, Date createTime, Date updateTime) {
public Resource(int id, int pid, String alias, String fullName, boolean isDirectory) {
this.id = id;
this.pid = pid;
this.alias = alias;
this.fullName = fullName;
this.isDirectory = isDirectory;
}
/*public Resource(String alias, String fileName, String description, int userId, ResourceType type, long size, Date createTime, Date updateTime) {
this.alias = alias;
this.fileName = fileName;
this.description = description;
@ -98,6 +121,20 @@ public class Resource {
this.size = size;
this.createTime = createTime;
this.updateTime = updateTime;
}*/
public Resource(int pid, String alias, String fullName, boolean isDirectory, String description, String fileName, int userId, ResourceType type, long size, Date createTime, Date updateTime) {
this.pid = pid;
this.alias = alias;
this.fullName = fullName;
this.isDirectory = isDirectory;
this.description = description;
this.fileName = fileName;
this.userId = userId;
this.type = type;
this.size = size;
this.createTime = createTime;
this.updateTime = updateTime;
}
public int getId() {
@ -116,6 +153,30 @@ public class Resource {
this.alias = alias;
}
public int getPid() {
return pid;
}
public void setPid(int pid) {
this.pid = pid;
}
public String getFullName() {
return fullName;
}
public void setFullName(String fullName) {
this.fullName = fullName;
}
public boolean isDirectory() {
return isDirectory;
}
public void setDirectory(boolean directory) {
isDirectory = directory;
}
public String getFileName() {
return fileName;
}
@ -177,9 +238,12 @@ public class Resource {
public String toString() {
return "Resource{" +
"id=" + id +
", pid=" + pid +
", alias='" + alias + '\'' +
", fileName='" + fileName + '\'' +
", fullName='" + fullName + '\'' +
", isDirectory=" + isDirectory +
", description='" + description + '\'' +
", fileName='" + fileName + '\'' +
", userId=" + userId +
", type=" + type +
", size=" + size +

11
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java

@ -20,9 +20,11 @@ import org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import org.apache.ibatis.annotations.MapKey;
import org.apache.ibatis.annotations.Param;
import java.util.List;
import java.util.Map;
/**
* process definition mapper interface
@ -83,7 +85,7 @@ public interface ProcessDefinitionMapper extends BaseMapper<ProcessDefinition> {
List<ProcessDefinition> queryDefinitionListByTenant(@Param("tenantId") int tenantId);
/**
* count process definition group by user
* count process definition group by user
* @param userId userId
* @param projectIds projectIds
* @param isAdmin isAdmin
@ -93,4 +95,11 @@ public interface ProcessDefinitionMapper extends BaseMapper<ProcessDefinition> {
@Param("userId") Integer userId,
@Param("projectIds") Integer[] projectIds,
@Param("isAdmin") boolean isAdmin);
/**
* list all resource ids
* @return resource ids list
*/
@MapKey("id")
List<Map<String, Object>> listResources();
}

53
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.java

@ -30,12 +30,12 @@ public interface ResourceMapper extends BaseMapper<Resource> {
/**
* query resource list
* @param alias alias
* @param fullName full name
* @param userId userId
* @param type type
* @return resource list
*/
List<Resource> queryResourceList(@Param("alias") String alias,
List<Resource> queryResourceList(@Param("fullName") String fullName,
@Param("userId") int userId,
@Param("type") int type);
@ -59,6 +59,7 @@ public interface ResourceMapper extends BaseMapper<Resource> {
*/
IPage<Resource> queryResourcePaging(IPage<Resource> page,
@Param("userId") int userId,
@Param("id") int id,
@Param("type") int type,
@Param("searchVal") String searchVal);
@ -76,13 +77,13 @@ public interface ResourceMapper extends BaseMapper<Resource> {
*/
List<Resource> queryResourceExceptUserId(@Param("userId") int userId);
/**
* query tenant code by name
* @param resName resource name
* @param resType resource type
* @return tenant code
*/
String queryTenantCodeByResourceName(@Param("resName") String resName);
String queryTenantCodeByResourceName(@Param("resName") String resName,@Param("resType") int resType);
/**
* list authorized resource
@ -91,4 +92,48 @@ public interface ResourceMapper extends BaseMapper<Resource> {
* @return resource list
*/
<T> List<Resource> listAuthorizedResource(@Param("userId") int userId,@Param("resNames")T[] resNames);
/**
* list authorized resource
* @param userId userId
* @param resIds resource ids
* @return resource list
*/
<T> List<Resource> listAuthorizedResourceById(@Param("userId") int userId,@Param("resIds")T[] resIds);
/**
* delete resource by id array
* @param resIds resource id array
* @return delete num
*/
int deleteIds(@Param("resIds")Integer[] resIds);
/**
* list children
* @param direcotyId directory id
* @return resource id array
*/
List<Integer> listChildren(@Param("direcotyId") int direcotyId);
/**
* query resource by full name or pid
* @param fullName full name
* @param type resource type
* @return resource
*/
List<Resource> queryResource(@Param("fullName") String fullName,@Param("type") int type);
/**
* list resource by id array
* @param resIds resource id array
* @return resource list
*/
List<Resource> listResourceByIds(@Param("resIds")Integer[] resIds);
/**
* update resource
* @param resourceList resource list
* @return update num
*/
int batchUpdateResource(@Param("resourceList") List<Resource> resourceList);
}

15
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.java

@ -86,4 +86,19 @@ public interface UdfFuncMapper extends BaseMapper<UdfFunc> {
*/
<T> List<UdfFunc> listAuthorizedUdfFunc (@Param("userId") int userId,@Param("udfIds")T[] udfIds);
/**
* list UDF by resource id
* @param resourceIds resource id array
* @return UDF function list
*/
List<UdfFunc> listUdfByResourceId(@Param("resourceIds") int[] resourceIds);
/**
* list authorized UDF by resource id
* @param resourceIds resource id array
* @return UDF function list
*/
List<UdfFunc> listAuthorizedUdfByResourceId(@Param("userId") int userId,@Param("resourceIds") int[] resourceIds);
}

11
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml

@ -29,7 +29,9 @@
and pd.name = #{processDefinitionName}
</select>
<select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
SELECT td.*,sc.schedule_release_state,tu.user_name
SELECT td.id, td.name, td.version, td.release_state, td.project_id, td.user_id, td.description, td.global_params,
td.flag, td.receivers, td.receivers_cc, td.timeout, td.tenant_id, td.modify_by, td.update_time, td.create_time,
sc.schedule_release_state, tu.user_name
FROM t_ds_process_definition td
left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id
left join t_ds_user tu on td.user_id = tu.id
@ -87,4 +89,11 @@
pd.user_id = u.id AND pd.project_id = p.id
AND pd.id = #{processDefineId}
</select>
<select id="listResources" resultType="java.util.HashMap">
SELECT id,resource_ids
FROM t_ds_process_definition
WHERE release_state = 1 and resource_ids is not null and resource_ids != ''
</select>
</mapper>

7
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml

@ -66,7 +66,12 @@
</select>
<select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select instance.*
select instance.id, instance.name, instance.process_definition_id, instance.state, instance.recovery, instance.start_time,
instance.end_time, instance.run_times, instance.host, instance.command_type, instance.command_param, instance.task_depend_type,
instance.max_try_times, instance.failure_strategy, instance.warning_type, instance.warning_group_id, instance.schedule_time,
instance.command_start_time, instance.global_params, instance.flag, instance.is_sub_process, instance.executor_id,
instance.history_cmd, instance.dependence_schedule_times, instance.process_instance_priority, instance.worker_group_id,
instance.timeout, instance.tenant_id, instance.update_time
from t_ds_process_instance instance
join t_ds_process_definition define ON instance.process_definition_id = define.id
where 1=1

79
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml

@ -22,8 +22,8 @@
select *
from t_ds_resources
where 1= 1
<if test="alias != null and alias != ''">
and alias = #{alias}
<if test="fullName != null and fullName != ''">
and full_name = #{fullName}
</if>
<if test="type != -1">
and type = #{type}
@ -47,8 +47,8 @@
<select id="queryResourcePaging" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where type=#{type}
<if test="userId != 0">
where type=#{type} and pid=#{id}
<if test="userId != 0 and id == -1">
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
union select id as resources_id from t_ds_resources where user_id=#{userId})
</if>
@ -70,7 +70,74 @@
<select id="queryTenantCodeByResourceName" resultType="java.lang.String">
select tenant_code
from t_ds_tenant t, t_ds_user u, t_ds_resources res
where t.id = u.tenant_id and u.id = res.user_id and res.type=0
and res.alias= #{resName}
where t.id = u.tenant_id and u.id = res.user_id and res.type=#{resType}
and res.full_name= #{resName}
</select>
<select id="listAuthorizedResource" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where type=0
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
union select id as resources_id from t_ds_resources where user_id=#{userId})
<if test="resNames != null and resNames != ''">
and full_name in
<foreach collection="resNames" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<select id="listAuthorizedResourceById" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
union select id as resources_id from t_ds_resources where user_id=#{userId})
<if test="resIds != null and resIds != ''">
and id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<delete id="deleteIds" parameterType="java.lang.Integer">
delete from t_ds_resources where id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</delete>
<select id="listChildren" resultType="java.lang.Integer">
select id
from t_ds_resources
where pid = #{direcotyId}
</select>
<select id="queryResource" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where type = #{type}
and full_name = #{fullName}
</select>
<update id="batchUpdateResource" parameterType="java.util.List">
<foreach collection="resourceList" item="resource" index="index" open="" close="" separator =";">
update t_ds_resources
<set>
full_name=#{resource.fullName},
update_time=#{resource.updateTime}
</set>
<where>
id=#{resource.id}
</where>
</foreach>
</update>
<select id="listResourceByIds" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</select>
</mapper>

24
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml

@ -87,4 +87,28 @@
</foreach>
</if>
</select>
<select id="listUdfByResourceId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where 1=1
<if test="resourceIds != null and resourceIds != ''">
and resource_id in
<foreach collection="resourceIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<select id="listAuthorizedUdfByResourceId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where
id in (select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
union select id as udf_id from t_ds_udfs where user_id=#{userId})
<if test="resourceIds != null and resourceIds != ''">
and resource_id in
<foreach collection="resourceIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
</mapper>

57
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapperTest.java

@ -34,6 +34,7 @@ import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.annotation.Transactional;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
@ -68,7 +69,10 @@ public class ResourceMapperTest {
private Resource insertOne(){
//insertOne
Resource resource = new Resource();
resource.setAlias("ut resource");
resource.setAlias("ut-resource");
resource.setFullName("/ut-resource");
resource.setPid(-1);
resource.setDirectory(false);
resource.setType(ResourceType.FILE);
resource.setUserId(111);
resourceMapper.insert(resource);
@ -80,16 +84,32 @@ public class ResourceMapperTest {
* @param user user
* @return Resource
*/
private Resource createResource(User user){
private Resource createResource(User user,boolean isDirectory,ResourceType resourceType,int pid,String alias,String fullName){
//insertOne
Resource resource = new Resource();
resource.setAlias(String.format("ut resource %s",user.getUserName()));
resource.setType(ResourceType.FILE);
resource.setDirectory(isDirectory);
resource.setType(resourceType);
resource.setAlias(alias);
resource.setFullName(fullName);
resource.setUserId(user.getId());
resourceMapper.insert(resource);
return resource;
}
/**
* create resource by user
* @param user user
* @return Resource
*/
private Resource createResource(User user){
//insertOne
String alias = String.format("ut-resource-%s",user.getUserName());
String fullName = String.format("/%s",alias);
Resource resource = createResource(user, false, ResourceType.FILE, -1, alias, fullName);
return resource;
}
/**
* create user
* @return User
@ -200,13 +220,15 @@ public class ResourceMapperTest {
IPage<Resource> resourceIPage = resourceMapper.queryResourcePaging(
page,
resource.getUserId(),
0,
-1,
resource.getType().ordinal(),
""
);
IPage<Resource> resourceIPage1 = resourceMapper.queryResourcePaging(
page,
1110,
-1,
resource.getType().ordinal(),
""
);
@ -289,7 +311,7 @@ public class ResourceMapperTest {
resourceMapper.updateById(resource);
String resource1 = resourceMapper.queryTenantCodeByResourceName(
resource.getAlias()
resource.getFullName(),ResourceType.FILE.ordinal()
);
@ -305,22 +327,37 @@ public class ResourceMapperTest {
User generalUser2 = createGeneralUser("user2");
// create one resource
Resource resource = createResource(generalUser2);
Resource unauthorizedResource = createResource(generalUser2);
Resource unauthorizedResource = createResource(generalUser1);
// need download resources
String[] resNames = new String[]{resource.getAlias(), unauthorizedResource.getAlias()};
String[] resNames = new String[]{resource.getFullName(), unauthorizedResource.getFullName()};
List<Resource> resources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames);
Assert.assertEquals(generalUser2.getId(),resource.getUserId());
Assert.assertFalse(resources.stream().map(t -> t.getAlias()).collect(toList()).containsAll(Arrays.asList(resNames)));
Assert.assertFalse(resources.stream().map(t -> t.getFullName()).collect(toList()).containsAll(Arrays.asList(resNames)));
// authorize object unauthorizedResource to generalUser
createResourcesUser(unauthorizedResource,generalUser2);
List<Resource> authorizedResources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames);
Assert.assertTrue(authorizedResources.stream().map(t -> t.getAlias()).collect(toList()).containsAll(Arrays.asList(resNames)));
Assert.assertTrue(authorizedResources.stream().map(t -> t.getFullName()).collect(toList()).containsAll(Arrays.asList(resNames)));
}
@Test
public void deleteIdsTest(){
// create a general user
User generalUser1 = createGeneralUser("user1");
Resource resource = createResource(generalUser1);
Resource resource1 = createResource(generalUser1);
List<Integer> resourceList = new ArrayList<>();
resourceList.add(resource.getId());
resourceList.add(resource1.getId());
int result = resourceMapper.deleteIds(resourceList.toArray(new Integer[resourceList.size()]));
Assert.assertEquals(result,2);
}
}

6
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/SubProcessTaskExecThread.java

@ -95,7 +95,7 @@ public class SubProcessTaskExecThread extends MasterBaseTaskExecThread {
* set task instance state
* @return
*/
private Boolean setTaskInstanceState(){
private boolean setTaskInstanceState(){
subProcessInstance = processService.findSubProcessInstance(processInstance.getId(), taskInstance.getId());
if(subProcessInstance == null || taskInstance.getState().typeIsFinished()){
return false;
@ -131,8 +131,8 @@ public class SubProcessTaskExecThread extends MasterBaseTaskExecThread {
if (taskInstance.getState().typeIsFinished()) {
logger.info("sub work flow task {} already complete. task state:{}, parent work flow instance state:{}",
this.taskInstance.getName(),
this.taskInstance.getState().toString(),
this.processInstance.getState().toString());
this.taskInstance.getState(),
this.processInstance.getState());
return;
}
while (Stopper.isRunning()) {

50
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskScheduleThread.java

@ -23,15 +23,18 @@ import com.alibaba.fastjson.JSON;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.log.TaskLogDiscriminator;
@ -96,7 +99,7 @@ public class TaskScheduleThread implements Runnable {
TaskNode taskNode = JSON.parseObject(taskInstance.getTaskJson(), TaskNode.class);
// get resource files
List<String> resourceFiles = createProjectResFiles(taskNode);
List<ResourceInfo> resourceFiles = createProjectResFiles(taskNode);
// copy hdfs/minio file to local
downloadResource(
taskInstance.getExecutePath(),
@ -165,6 +168,7 @@ public class TaskScheduleThread implements Runnable {
new Date(),
taskInstance.getId());
}
/**
* get global paras map
* @return
@ -289,14 +293,16 @@ public class TaskScheduleThread implements Runnable {
/**
* create project resource files
*/
private List<String> createProjectResFiles(TaskNode taskNode) throws Exception{
private List<ResourceInfo> createProjectResFiles(TaskNode taskNode) throws Exception{
Set<String> projectFiles = new HashSet<>();
Set<ResourceInfo> projectFiles = new HashSet<>();
AbstractParameters baseParam = TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams());
if (baseParam != null) {
List<String> projectResourceFiles = baseParam.getResourceFilesList();
projectFiles.addAll(projectResourceFiles);
List<ResourceInfo> projectResourceFiles = baseParam.getResourceFilesList();
if (projectResourceFiles != null) {
projectFiles.addAll(projectResourceFiles);
}
}
return new ArrayList<>(projectFiles);
@ -309,18 +315,25 @@ public class TaskScheduleThread implements Runnable {
* @param projectRes
* @param logger
*/
private void downloadResource(String execLocalPath, List<String> projectRes, Logger logger) throws Exception {
private void downloadResource(String execLocalPath, List<ResourceInfo> projectRes, Logger logger) throws Exception {
checkDownloadPermission(projectRes);
for (String res : projectRes) {
File resFile = new File(execLocalPath, res);
String resourceName;
for (ResourceInfo res : projectRes) {
if (res.getId() != 0) {
Resource resource = processService.getResourceById(res.getId());
resourceName = resource.getFullName();
}else{
resourceName = res.getRes();
}
File resFile = new File(execLocalPath, resourceName);
if (!resFile.exists()) {
try {
// query the tenant code of the resource according to the name of the resource
String tentnCode = processService.queryTenantCodeByResName(res);
String resHdfsPath = HadoopUtils.getHdfsFilename(tentnCode, res);
String tentnCode = processService.queryTenantCodeByResName(resourceName, ResourceType.FILE);
String resHdfsPath = HadoopUtils.getHdfsResourceFileName(tentnCode, resourceName);
logger.info("get resource file from hdfs :{}", resHdfsPath);
HadoopUtils.getInstance().copyHdfsToLocal(resHdfsPath, execLocalPath + File.separator + res, false, true);
HadoopUtils.getInstance().copyHdfsToLocal(resHdfsPath, execLocalPath + File.separator + resourceName, false, true);
}catch (Exception e){
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage());
@ -336,10 +349,17 @@ public class TaskScheduleThread implements Runnable {
* @param projectRes resource name list
* @throws Exception exception
*/
private void checkDownloadPermission(List<String> projectRes) throws Exception {
private void checkDownloadPermission(List<ResourceInfo> projectRes) throws Exception {
int userId = taskInstance.getProcessInstance().getExecutorId();
String[] resNames = projectRes.toArray(new String[projectRes.size()]);
PermissionCheck<String> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE, processService,resNames,userId,logger);
permissionCheck.checkPermission();
if (projectRes.stream().allMatch(t->t.getId() == 0)) {
String[] resNames = projectRes.stream().map(t -> t.getRes()).collect(Collectors.toList()).toArray(new String[projectRes.size()]);
PermissionCheck<String> permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_NAME,processService,resNames,userId,logger);
permissionCheck.checkPermission();
}else{
Integer[] resIds = projectRes.stream().map(t -> t.getId()).collect(Collectors.toList()).toArray(new Integer[projectRes.size()]);
PermissionCheck<Integer> permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_ID,processService,resIds,userId,logger);
permissionCheck.checkPermission();
}
}
}

5
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java

@ -94,4 +94,9 @@ public abstract class AbstractYarnTask extends AbstractTask {
* @throws Exception exception
*/
protected abstract String buildCommand() throws Exception;
/**
* set main jar name
*/
protected abstract void setMainJarName();
}

25
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java

@ -17,12 +17,14 @@
package org.apache.dolphinscheduler.server.worker.task.flink;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.FlinkArgsUtils;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
@ -63,6 +65,7 @@ public class FlinkTask extends AbstractYarnTask {
if (!flinkParameters.checkParameters()) {
throw new RuntimeException("flink task params is not valid");
}
setMainJarName();
flinkParameters.setQueue(taskProps.getQueue());
if (StringUtils.isNotEmpty(flinkParameters.getMainArgs())) {
@ -111,6 +114,28 @@ public class FlinkTask extends AbstractYarnTask {
return command;
}
@Override
protected void setMainJarName() {
// main jar
ResourceInfo mainJar = flinkParameters.getMainJar();
if (mainJar != null) {
int resourceId = mainJar.getId();
String resourceName;
if (resourceId == 0) {
resourceName = mainJar.getRes();
} else {
Resource resource = processService.getResourceById(flinkParameters.getMainJar().getId());
if (resource == null) {
logger.error("resource id: {} not exist", resourceId);
throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
}
resourceName = resource.getFullName().replaceFirst("/", "");
}
mainJar.setRes(resourceName);
flinkParameters.setMainJar(mainJar);
}
}
@Override
public AbstractParameters getParameters() {
return flinkParameters;

26
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/mr/MapReduceTask.java

@ -19,11 +19,13 @@ package org.apache.dolphinscheduler.server.worker.task.mr;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.mr.MapreduceParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
@ -64,7 +66,7 @@ public class MapReduceTask extends AbstractYarnTask {
if (!mapreduceParameters.checkParameters()) {
throw new RuntimeException("mapreduce task params is not valid");
}
setMainJarName();
mapreduceParameters.setQueue(taskProps.getQueue());
// replace placeholder
@ -99,6 +101,28 @@ public class MapReduceTask extends AbstractYarnTask {
return command;
}
@Override
protected void setMainJarName() {
// main jar
ResourceInfo mainJar = mapreduceParameters.getMainJar();
if (mainJar != null) {
int resourceId = mainJar.getId();
String resourceName;
if (resourceId == 0) {
resourceName = mainJar.getRes();
} else {
Resource resource = processService.getResourceById(mapreduceParameters.getMainJar().getId());
if (resource == null) {
logger.error("resource id: {} not exist", resourceId);
throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
}
resourceName = resource.getFullName().replaceFirst("/", "");
}
mainJar.setRes(resourceName);
mapreduceParameters.setMainJar(mainJar);
}
}
@Override
public AbstractParameters getParameters() {
return mapreduceParameters;

17
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTask.java

@ -143,13 +143,16 @@ public class ShellTask extends AbstractTask {
taskProps.getCmdTypeIfComplement(),
taskProps.getScheduleTime());
// replace variable TIME with $[YYYYmmddd...] in shell file when history run job and batch complement job
if(paramsMap != null && taskProps.getScheduleTime()!=null) {
String dateTime = DateUtils.format(taskProps.getScheduleTime(), Constants.PARAMETER_FORMAT_TIME);
Property p = new Property();
p.setValue(dateTime);
p.setProp(Constants.PARAMETER_SHECDULE_TIME);
paramsMap.put(Constants.PARAMETER_SHECDULE_TIME, p);
// new
// replace variable TIME with $[YYYYmmddd...] in shell file when history run job and batch complement job
if (paramsMap != null) {
if (taskProps.getScheduleTime() != null) {
String dateTime = DateUtils.format(taskProps.getScheduleTime(), Constants.PARAMETER_FORMAT_TIME);
Property p = new Property();
p.setValue(dateTime);
p.setProp(Constants.PARAMETER_SHECDULE_TIME);
paramsMap.put(Constants.PARAMETER_SHECDULE_TIME, p);
}
script = ParameterUtils.convertParameterPlaceholders2(script, ParamUtils.convert(paramsMap));
}

26
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTask.java

@ -18,11 +18,13 @@ package org.apache.dolphinscheduler.server.worker.task.spark;
import org.apache.dolphinscheduler.common.enums.SparkVersion;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.spark.SparkParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.SparkArgsUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
@ -67,8 +69,8 @@ public class SparkTask extends AbstractYarnTask {
if (!sparkParameters.checkParameters()) {
throw new RuntimeException("spark task params is not valid");
}
setMainJarName();
sparkParameters.setQueue(taskProps.getQueue());
if (StringUtils.isNotEmpty(sparkParameters.getMainArgs())) {
String args = sparkParameters.getMainArgs();
@ -115,6 +117,28 @@ public class SparkTask extends AbstractYarnTask {
return command;
}
@Override
protected void setMainJarName() {
// main jar
ResourceInfo mainJar = sparkParameters.getMainJar();
if (mainJar != null) {
int resourceId = mainJar.getId();
String resourceName;
if (resourceId == 0) {
resourceName = mainJar.getRes();
} else {
Resource resource = processService.getResourceById(sparkParameters.getMainJar().getId());
if (resource == null) {
logger.error("resource id: {} not exist", resourceId);
throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
}
resourceName = resource.getFullName().replaceFirst("/", "");
}
mainJar.setRes(resourceName);
sparkParameters.setMainJar(mainJar);
}
}
@Override
public AbstractParameters getParameters() {
return sparkParameters;

31
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java

@ -25,6 +25,7 @@ import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.alert.utils.MailUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.enums.UdfType;
@ -247,11 +248,12 @@ public class SqlTask extends AbstractTask {
List<String> createFuncs){
Connection connection = null;
try {
// if upload resource is HDFS and kerberos startup
CommonUtils.loadKerberosConf();
// if hive , load connection params if exists
if (HIVE == dataSource.getType()) {
if (DbType.HIVE == dataSource.getType() || DbType.SPARK == dataSource.getType()) {
// if upload resource is HDFS and kerberos startup
CommonUtils.loadKerberosConf();
Properties paramProp = new Properties();
paramProp.setProperty(USER, baseDataSource.getUser());
paramProp.setProperty(PASSWORD, baseDataSource.getPassword());
@ -378,7 +380,7 @@ public class SqlTask extends AbstractTask {
List<User> users = alertDao.queryUserByAlertGroupId(instance.getWarningGroupId());
// receiving group list
List<String> receviersList = new ArrayList<String>();
List<String> receviersList = new ArrayList<>();
for(User user:users){
receviersList.add(user.getEmail().trim());
}
@ -392,7 +394,7 @@ public class SqlTask extends AbstractTask {
}
// copy list
List<String> receviersCcList = new ArrayList<String>();
List<String> receviersCcList = new ArrayList<>();
// Custom Copier
String receiversCc = sqlParameters.getReceiversCc();
if (StringUtils.isNotEmpty(receiversCc)){
@ -406,7 +408,7 @@ public class SqlTask extends AbstractTask {
if(EnumUtils.isValidEnum(ShowType.class,showTypeName)){
Map<String, Object> mailResult = MailUtils.sendMails(receviersList,
receviersCcList, title, content, ShowType.valueOf(showTypeName));
if(!(Boolean) mailResult.get(STATUS)){
if(!(boolean) mailResult.get(STATUS)){
throw new RuntimeException("send mail failed!");
}
}else{
@ -463,22 +465,7 @@ public class SqlTask extends AbstractTask {
ProcessInstance processInstance = processService.findProcessInstanceByTaskId(taskProps.getTaskInstId());
int userId = processInstance.getExecutorId();
PermissionCheck<Integer> permissionCheckUdf = new PermissionCheck<Integer>(AuthorizationType.UDF, processService,udfFunIds,userId,logger);
PermissionCheck<Integer> permissionCheckUdf = new PermissionCheck<>(AuthorizationType.UDF, processService,udfFunIds,userId,logger);
permissionCheckUdf.checkPermission();
}
/**
* check data source permission
* @param dataSourceId data source id
* @return if has download permission return true else false
*/
private void checkDataSourcePermission(int dataSourceId) throws Exception{
// process instance
ProcessInstance processInstance = processService.findProcessInstanceByTaskId(taskProps.getTaskInstId());
int userId = processInstance.getExecutorId();
PermissionCheck<Integer> permissionCheckDataSource = new PermissionCheck<Integer>(AuthorizationType.DATASOURCE, processService,new Integer[]{dataSourceId},userId,logger);
permissionCheckDataSource.checkPermission();
}
}

4
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTask.java

@ -71,6 +71,10 @@ public class SqoopTask extends AbstractYarnTask {
return null;
}
@Override
protected void setMainJarName() {
}
@Override
public AbstractParameters getParameters() {
return sqoopParameters;

60
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTaskTest.java

@ -25,13 +25,10 @@ import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.junit.*;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
@ -45,6 +42,7 @@ import java.util.Date;
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest(OSUtils.class)
@PowerMockIgnore({"javax.management.*"})
public class ShellTaskTest {
private static final Logger logger = LoggerFactory.getLogger(ShellTaskTest.class);
@ -136,6 +134,28 @@ public class ShellTaskTest {
}
}
@Test
public void testInitException() {
TaskProps props = new TaskProps();
props.setTaskDir("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstId(1);
props.setTenantCode("1");
props.setEnvFile(".dolphinscheduler_env.sh");
props.setTaskStartTime(new Date());
props.setTaskTimeout(0);
props.setTaskParams("{\"rawScript\": \"\"}");
ShellTask shellTask = new ShellTask(props, logger);
try {
shellTask.init();
} catch (Exception e) {
logger.info(e.getMessage(), e);
if (e.getMessage().contains("shell task params is not valid")) {
Assert.assertTrue(true);
}
}
}
/**
* Method: init for Windows
*/
@ -157,7 +177,20 @@ public class ShellTaskTest {
public void testHandleForUnix() throws Exception {
try {
PowerMockito.when(OSUtils.isWindows()).thenReturn(false);
shellTask.handle();
TaskProps props = new TaskProps();
props.setTaskDir("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstId(1);
props.setTenantCode("1");
props.setEnvFile(".dolphinscheduler_env.sh");
props.setTaskStartTime(new Date());
props.setTaskTimeout(0);
props.setScheduleTime(new Date());
props.setCmdTypeIfComplement(CommandType.START_PROCESS);
props.setTaskParams("{\"rawScript\": \" echo ${test}\", \"localParams\": [{\"prop\":\"test\", \"direct\":\"IN\", \"type\":\"VARCHAR\", \"value\":\"123\"}]}");
ShellTask shellTask1 = new ShellTask(props, logger);
shellTask1.init();
shellTask1.handle();
Assert.assertTrue(true);
} catch (Error | Exception e) {
if (!e.getMessage().contains("process error . exitCode is : -1")
@ -174,7 +207,20 @@ public class ShellTaskTest {
public void testHandleForWindows() throws Exception {
try {
Assume.assumeTrue(OSUtils.isWindows());
shellTask.handle();
TaskProps props = new TaskProps();
props.setTaskDir("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstId(1);
props.setTenantCode("1");
props.setEnvFile(".dolphinscheduler_env.sh");
props.setTaskStartTime(new Date());
props.setTaskTimeout(0);
props.setScheduleTime(new Date());
props.setCmdTypeIfComplement(CommandType.START_PROCESS);
props.setTaskParams("{\"rawScript\": \" echo ${test}\", \"localParams\": [{\"prop\":\"test\", \"direct\":\"IN\", \"type\":\"VARCHAR\", \"value\":\"123\"}]}");
ShellTask shellTask1 = new ShellTask(props, logger);
shellTask1.init();
shellTask1.handle();
Assert.assertTrue(true);
} catch (Error | Exception e) {
if (!e.getMessage().contains("process error . exitCode is : -1")) {

2
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTaskTest.java

@ -135,7 +135,7 @@ public class SparkTaskTest {
logger.info("spark task command : {}", sparkArgs);
Assert.assertEquals(sparkArgs.split(" ")[0], SPARK2_COMMAND );
Assert.assertEquals(SPARK2_COMMAND, sparkArgs.split(" ")[0]);
}
}

31
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/permission/PermissionCheck.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.service.permission;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.service.process.ProcessService;
@ -45,6 +46,11 @@ public class PermissionCheck<T> {
*/
private T[] needChecks;
/**
* resoruce info
*/
private List<ResourceInfo> resourceList;
/**
* user id
*/
@ -90,6 +96,22 @@ public class PermissionCheck<T> {
this.logger = logger;
}
/**
* permission check
* @param logger
* @param authorizationType
* @param processService
* @param resourceList
* @param userId
*/
public PermissionCheck(AuthorizationType authorizationType, ProcessService processService, List<ResourceInfo> resourceList, int userId,Logger logger) {
this.authorizationType = authorizationType;
this.processService = processService;
this.resourceList = resourceList;
this.userId = userId;
this.logger = logger;
}
public AuthorizationType getAuthorizationType() {
return authorizationType;
}
@ -122,6 +144,14 @@ public class PermissionCheck<T> {
this.userId = userId;
}
public List<ResourceInfo> getResourceList() {
return resourceList;
}
public void setResourceList(List<ResourceInfo> resourceList) {
this.resourceList = resourceList;
}
/**
* has permission
* @return true if has permission
@ -141,6 +171,7 @@ public class PermissionCheck<T> {
*/
public void checkPermission() throws Exception{
if(this.needChecks.length > 0){
// get user type in order to judge whether the user is admin
User user = processService.getUserById(userId);
if (user.getUserType() != UserType.ADMIN_USER){

26
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java

@ -1556,10 +1556,11 @@ public class ProcessService {
/**
* find tenant code by resource name
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName){
return resourceMapper.queryTenantCodeByResourceName(resName);
public String queryTenantCodeByResName(String resName,ResourceType resourceType){
return resourceMapper.queryTenantCodeByResourceName(resName,resourceType.ordinal());
}
/**
@ -1791,10 +1792,18 @@ public class ProcessService {
Set<T> originResSet = new HashSet<T>(Arrays.asList(needChecks));
switch (authorizationType){
case RESOURCE_FILE:
Set<String> authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(t -> t.getAlias()).collect(toSet());
case RESOURCE_FILE_ID:
Set<Integer> authorizedResourceFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
Set<String> authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(t -> t.getFullName()).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case UDF_FILE:
Set<Integer> authorizedUdfFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedUdfFiles);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId,needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedDatasources);
@ -1820,5 +1829,14 @@ public class ProcessService {
return userMapper.queryDetailsById(userId);
}
/**
* get resource by resoruce id
* @param resoruceId resource id
* @return Resource
*/
public Resource getResourceById(int resoruceId){
return resourceMapper.selectById(resoruceId);
}
}

1
dolphinscheduler-ui/package.json

@ -11,6 +11,7 @@
"build:release": "npm run clean && cross-env NODE_ENV=production PUBLIC_PATH=/dolphinscheduler/ui webpack --config ./build/webpack.config.release.js"
},
"dependencies": {
"@riophae/vue-treeselect": "^0.4.0",
"ans-ui": "1.1.7",
"axios": "^0.16.2",
"bootstrap": "3.3.7",

90
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue

@ -48,19 +48,9 @@
<m-list-box>
<div slot="text">{{$t('Main jar package')}}</div>
<div slot="content">
<x-select
style="width: 100%;"
:placeholder="$t('Please enter main jar package')"
v-model="mainJar"
filterable
:disabled="isDetails">
<x-option
v-for="city in mainJarList"
:key="city.code"
:value="city.code"
:label="city.code">
</x-option>
</x-select>
<treeselect v-model="mainJar" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :placeholder="$t('Please enter main jar package')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<m-list-box>
@ -151,12 +141,9 @@
<m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<m-resources
ref="refResources"
@on-resourcesData="_onResourcesData"
@on-cache-resourcesData="_onCacheResourcesData"
:resource-list="resourceList">
</m-resources>
<treeselect v-model="resourceList" :multiple="true" :options="mainJarList" :normalizer="normalizer" :placeholder="$t('Please select resources')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<m-list-box>
@ -178,6 +165,8 @@
import mLocalParams from './_source/localParams'
import mListBox from './_source/listBox'
import mResources from './_source/resources'
import Treeselect from '@riophae/vue-treeselect'
import '@riophae/vue-treeselect/dist/vue-treeselect.css'
import disabledState from '@/module/mixin/disabledState'
export default {
@ -189,6 +178,7 @@
// Master jar package
mainJar: null,
// Master jar package(List)
mainJarLists: [],
mainJarList: [],
// Deployment method
deployMode: 'cluster',
@ -215,7 +205,12 @@
// Program type
programType: 'SCALA',
// Program type(List)
programTypeList: [{ code: 'JAVA' }, { code: 'SCALA' }, { code: 'PYTHON' }]
programTypeList: [{ code: 'JAVA' }, { code: 'SCALA' }, { code: 'PYTHON' }],
normalizer(node) {
return {
label: node.name
}
}
}
},
props: {
@ -291,10 +286,6 @@
return false
}
if (!this.$refs.refResources._verifResources()) {
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
@ -304,10 +295,12 @@
this.$emit('on-params', {
mainClass: this.mainClass,
mainJar: {
res: this.mainJar
id: this.mainJar
},
deployMode: this.deployMode,
resourceList: this.resourceList,
resourceList: _.map(this.resourceList, v => {
return {id: v}
}),
localParams: this.localParams,
slot: this.slot,
taskManager: this.taskManager,
@ -320,24 +313,12 @@
})
return true
},
/**
* get resources list
*/
_getResourcesList () {
return new Promise((resolve, reject) => {
let isJar = (alias) => {
return alias.substring(alias.lastIndexOf('.') + 1, alias.length) !== 'jar'
}
this.mainJarList = _.map(_.cloneDeep(this.store.state.dag.resourcesListS), v => {
return {
id: v.id,
code: v.alias,
disabled: isJar(v.alias)
}
})
resolve()
diGuiTree(item) { // Recursive convenience tree structure
item.forEach(item => {
item.children === '' || item.children === undefined || item.children === null || item.children.length === 0?        
delete item.children : this.diGuiTree(item.children);
})
}
},
},
watch: {
// Listening type
@ -356,10 +337,12 @@
return {
mainClass: this.mainClass,
mainJar: {
res: this.mainJar
id: this.mainJar
},
deployMode: this.deployMode,
resourceList: this.cacheResourceList,
resourceList: _.map(this.resourceList, v => {
return {id: v}
}),
localParams: this.localParams,
slot: this.slot,
taskManager: this.taskManager,
@ -373,13 +356,17 @@
}
},
created () {
this._getResourcesList().then(() => {
let item = this.store.state.dag.resourcesListS
let items = this.store.state.dag.resourcesListJar
this.diGuiTree(item)
this.diGuiTree(items)
this.mainJarList = item
this.mainJarLists = items
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.mainClass = o.params.mainClass || ''
this.mainJar = o.params.mainJar && o.params.mainJar.res ? o.params.mainJar.res : ''
this.mainJar = o.params.mainJar && o.params.mainJar.id ? o.params.mainJar.id : ''
this.deployMode = o.params.deployMode || ''
this.slot = o.params.slot || 1
this.taskManager = o.params.taskManager || '2'
@ -393,7 +380,9 @@
// backfill resourceList
let resourceList = o.params.resourceList || []
if (resourceList.length) {
this.resourceList = resourceList
this.resourceList = _.map(resourceList, v => {
return v.id
})
this.cacheResourceList = resourceList
}
@ -403,12 +392,11 @@
this.localParams = localParams
}
}
})
},
mounted () {
},
components: { mLocalParams, mListBox, mResources }
components: { mLocalParams, mListBox, mResources, Treeselect }
}
</script>

93
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/mr.vue

@ -44,19 +44,9 @@
<m-list-box>
<div slot="text">{{$t('Main jar package')}}</div>
<div slot="content">
<x-select
style="width: 100%;"
:placeholder="$t('Please enter main jar package')"
v-model="mainJar"
filterable
:disabled="isDetails">
<x-option
v-for="city in mainJarList"
:key="city.code"
:value="city.code"
:label="city.code">
</x-option>
</x-select>
<treeselect v-model="mainJar" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :placeholder="$t('Please enter main jar package')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<m-list-box>
@ -88,12 +78,9 @@
<m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<m-resources
ref="refResources"
@on-resourcesData="_onResourcesData"
@on-cache-resourcesData="_onCacheResourcesData"
:resource-list="resourceList">
</m-resources>
<treeselect v-model="resourceList" :multiple="true" :options="mainJarList" :normalizer="normalizer" :placeholder="$t('Please select resources')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<m-list-box>
@ -115,6 +102,8 @@
import mListBox from './_source/listBox'
import mResources from './_source/resources'
import mLocalParams from './_source/localParams'
import Treeselect from '@riophae/vue-treeselect'
import '@riophae/vue-treeselect/dist/vue-treeselect.css'
import disabledState from '@/module/mixin/disabledState'
export default {
name: 'mr',
@ -125,6 +114,7 @@
// Master jar package
mainJar: null,
// Main jar package (List)
mainJarLists: [],
mainJarList: [],
// Resource(list)
resourceList: [],
@ -139,7 +129,12 @@
// Program type
programType: 'JAVA',
// Program type(List)
programTypeList: [{ code: 'JAVA' }, { code: 'PYTHON' }]
programTypeList: [{ code: 'JAVA' }, { code: 'PYTHON' }],
normalizer(node) {
return {
label: node.name
}
}
}
},
props: {
@ -165,6 +160,12 @@
_onCacheResourcesData (a) {
this.cacheResourceList = a
},
diGuiTree(item) { // Recursive convenience tree structure
item.forEach(item => {
item.children === '' || item.children === undefined || item.children === null || item.children.length === 0?        
delete item.children : this.diGuiTree(item.children);
})
},
/**
* verification
*/
@ -179,22 +180,19 @@
return false
}
if (!this.$refs.refResources._verifResources()) {
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
mainClass: this.mainClass,
mainJar: {
res: this.mainJar
id: this.mainJar
},
resourceList: this.resourceList,
resourceList: _.map(this.resourceList, v => {
return {id: v}
}),
localParams: this.localParams,
mainArgs: this.mainArgs,
others: this.others,
@ -202,24 +200,7 @@
})
return true
},
/**
* Get resource data
*/
_getResourcesList () {
return new Promise((resolve, reject) => {
let isJar = (alias) => {
return alias.substring(alias.lastIndexOf('.') + 1, alias.length) !== 'jar'
}
this.mainJarList = _.map(_.cloneDeep(this.store.state.dag.resourcesListS), v => {
return {
id: v.id,
code: v.alias,
disabled: isJar(v.alias)
}
})
resolve()
})
}
},
watch: {
/**
@ -240,9 +221,11 @@
return {
mainClass: this.mainClass,
mainJar: {
res: this.mainJar
id: this.mainJar
},
resourceList: this.cacheResourceList,
resourceList: _.map(this.resourceList, v => {
return {id: v}
}),
localParams: this.localParams,
mainArgs: this.mainArgs,
others: this.others,
@ -251,13 +234,18 @@
}
},
created () {
this._getResourcesList().then(() => {
let item = this.store.state.dag.resourcesListS
let items = this.store.state.dag.resourcesListJar
this.diGuiTree(item)
this.diGuiTree(items)
this.mainJarList = item
this.mainJarLists = items
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.mainClass = o.params.mainClass || ''
this.mainJar = o.params.mainJar.res || ''
this.mainJar = o.params.mainJar.id || ''
this.mainArgs = o.params.mainArgs || ''
this.others = o.params.others
this.programType = o.params.programType || 'JAVA'
@ -265,7 +253,9 @@
// backfill resourceList
let resourceList = o.params.resourceList || []
if (resourceList.length) {
this.resourceList = resourceList
this.resourceList = _.map(resourceList, v => {
return v.id
})
this.cacheResourceList = resourceList
}
@ -275,12 +265,11 @@
this.localParams = localParams
}
}
})
},
mounted () {
},
components: { mLocalParams, mListBox, mResources }
components: { mLocalParams, mListBox, mResources, Treeselect }
}
</script>

50
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/python.vue

@ -108,10 +108,6 @@
return false
}
if (!this.$refs.refResources._verifResources()) {
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
@ -119,7 +115,9 @@
// storage
this.$emit('on-params', {
resourceList: this.resourceList,
resourceList: _.map(this.resourceList, v => {
return {id: v}
}),
localParams: this.localParams,
rawScript: editor.getValue()
})
@ -129,8 +127,6 @@
* Processing code highlighting
*/
_handlerEditor () {
this._destroyEditor()
// editor
editor = codemirror('code-python-mirror', {
mode: 'python',
@ -145,45 +141,28 @@
}
}
this.changes = () => {
this._cacheParams()
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.on('changes', this.changes)
editor.setValue(this.rawScript)
return editor
},
_cacheParams () {
this.$emit('on-cache-params', {
resourceList: this.cacheResourceList,
localParams: this.localParams,
rawScript: editor ? editor.getValue() : ''
});
},
_destroyEditor () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-python-mirror'), 'keypress', this.keypress)
editor.off($('.code-python-mirror'), 'changes', this.changes)
}
}
},
watch: {
//Watch the cacheParams
cacheParams (val) {
this._cacheParams()
this.$emit('on-cache-params', val);
}
},
computed: {
cacheParams () {
return {
resourceList: this.cacheResourceList,
localParams: this.localParams
resourceList: _.map(this.resourceList, v => {
return {id: v}
}),
localParams: this.localParams,
rawScript: editor ? editor.getValue() : ''
}
}
},
@ -197,7 +176,9 @@
// backfill resourceList
let resourceList = o.params.resourceList || []
if (resourceList.length) {
this.resourceList = resourceList
this.resourceList = _.map(resourceList, v => {
return v.id
})
this.cacheResourceList = resourceList
}
@ -214,11 +195,8 @@
}, 200)
},
destroyed () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-python-mirror'), 'keypress', this.keypress)
editor.off($('.code-python-mirror'), 'changes', this.changes)
}
editor.toTextArea() // Uninstall
editor.off($('.code-python-mirror'), 'keypress', this.keypress)
},
components: { mLocalParams, mListBox, mResources }
}

80
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/shell.vue

@ -32,6 +32,14 @@
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<treeselect v-model="resourceList" :multiple="true" :options="options" :normalizer="normalizer" :placeholder="$t('Please select resources')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<!-- <m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<m-resources
@ -41,7 +49,7 @@
:resource-list="resourceList">
</m-resources>
</div>
</m-list-box>
</m-list-box> -->
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
@ -63,6 +71,8 @@
import mResources from './_source/resources'
import mLocalParams from './_source/localParams'
import disabledState from '@/module/mixin/disabledState'
import Treeselect from '@riophae/vue-treeselect'
import '@riophae/vue-treeselect/dist/vue-treeselect.css'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
@ -78,7 +88,14 @@
// resource(list)
resourceList: [],
// Cache ResourceList
cacheResourceList: []
cacheResourceList: [],
// define options
options: [],
normalizer(node) {
return {
label: node.name
}
},
}
},
mixins: [disabledState],
@ -143,17 +160,19 @@
return false
}
if (!this.$refs.refResources._verifResources()) {
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// Process resourcelist
let dataProcessing= _.map(this.resourceList, v => {
return {
id: v
}
})
// storage
this.$emit('on-params', {
resourceList: this.resourceList,
resourceList: dataProcessing,
localParams: this.localParams,
rawScript: editor.getValue()
})
@ -163,8 +182,6 @@
* Processing code highlighting
*/
_handlerEditor () {
this._destroyEditor()
// editor
editor = codemirror('code-shell-mirror', {
mode: 'shell',
@ -179,51 +196,41 @@
}
}
this.changes = () => {
this._cacheParams()
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.on('changes', this.changes)
editor.setValue(this.rawScript)
return editor
},
_cacheParams () {
this.$emit('on-cache-params', {
resourceList: this.cacheResourceList,
localParams: this.localParams,
rawScript: editor ? editor.getValue() : ''
});
},
_destroyEditor () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
editor.off($('.code-sql-mirror'), 'changes', this.changes)
}
diGuiTree(item) { // Recursive convenience tree structure
item.forEach(item => {
item.children === '' || item.children === undefined || item.children === null || item.children.length === 0?        
delete item.children : this.diGuiTree(item.children);
})
}
},
watch: {
//Watch the cacheParams
cacheParams (val) {
this._cacheParams()
this.$emit('on-cache-params', val);
}
},
computed: {
cacheParams () {
return {
resourceList: this.cacheResourceList,
localParams: this.localParams
resourceList: _.map(this.resourceList, v => {
return {id: v}
}),
localParams: this.localParams,
rawScript: editor ? editor.getValue() : ''
}
}
},
created () {
let item = this.store.state.dag.resourcesListS
this.diGuiTree(item)
this.options = item
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.rawScript = o.params.rawScript || ''
@ -231,7 +238,9 @@
// backfill resourceList
let resourceList = o.params.resourceList || []
if (resourceList.length) {
this.resourceList = resourceList
this.resourceList = _.map(resourceList, v => {
return v.id
})
this.cacheResourceList = resourceList
}
@ -251,10 +260,9 @@
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-shell-mirror'), 'keypress', this.keypress)
editor.off($('.code-shell-mirror'), 'changes', this.changes)
}
},
components: { mLocalParams, mListBox, mResources, mScriptBox }
components: { mLocalParams, mListBox, mResources, mScriptBox, Treeselect }
}
</script>
<style lang="scss" rel="stylesheet/scss" scope>

97
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/spark.vue

@ -63,19 +63,9 @@
<m-list-box>
<div slot="text">{{$t('Main jar package')}}</div>
<div slot="content">
<x-select
style="width: 100%;"
:placeholder="$t('Please enter main jar package')"
v-model="mainJar"
filterable
:disabled="isDetails">
<x-option
v-for="city in mainJarList"
:key="city.code"
:value="city.code"
:label="city.code">
</x-option>
</x-select>
<treeselect v-model="mainJar" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :placeholder="$t('Please enter main jar package')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<m-list-box>
@ -177,6 +167,14 @@
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<treeselect v-model="resourceList" :multiple="true" :options="mainJarList" :normalizer="normalizer" :placeholder="$t('Please select resources')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<!-- <m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<m-resources
@ -186,7 +184,7 @@
:resource-list="resourceList">
</m-resources>
</div>
</m-list-box>
</m-list-box> -->
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
@ -206,6 +204,8 @@
import mLocalParams from './_source/localParams'
import mListBox from './_source/listBox'
import mResources from './_source/resources'
import Treeselect from '@riophae/vue-treeselect'
import '@riophae/vue-treeselect/dist/vue-treeselect.css'
import disabledState from '@/module/mixin/disabledState'
export default {
@ -217,6 +217,7 @@
// Master jar package
mainJar: null,
// Master jar package(List)
mainJarLists: [],
mainJarList: [],
// Deployment method
deployMode: 'cluster',
@ -247,7 +248,12 @@
// Spark version
sparkVersion: 'SPARK2',
// Spark version(LIst)
sparkVersionList: [{ code: 'SPARK2' }, { code: 'SPARK1' }]
sparkVersionList: [{ code: 'SPARK2' }, { code: 'SPARK1' }],
normalizer(node) {
return {
label: node.name
}
}
}
},
props: {
@ -273,6 +279,12 @@
_onCacheResourcesData (a) {
this.cacheResourceList = a
},
diGuiTree(item) { // Recursive convenience tree structure
item.forEach(item => {
item.children === '' || item.children === undefined || item.children === null || item.children.length === 0?        
delete item.children : this.diGuiTree(item.children);
})
},
/**
* verification
*/
@ -321,24 +333,25 @@
this.$message.warning(`${i18n.$t('Core number should be positive integer')}`)
return false
}
if (!this.$refs.refResources._verifResources()) {
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// Process resourcelist
let dataProcessing= _.map(this.resourceList, v => {
return {
id: v
}
})
// storage
this.$emit('on-params', {
mainClass: this.mainClass,
mainJar: {
res: this.mainJar
id: this.mainJar
},
deployMode: this.deployMode,
resourceList: this.resourceList,
resourceList: dataProcessing,
localParams: this.localParams,
driverCores: this.driverCores,
driverMemory: this.driverMemory,
@ -351,24 +364,6 @@
sparkVersion: this.sparkVersion
})
return true
},
/**
* get resources list
*/
_getResourcesList () {
return new Promise((resolve, reject) => {
let isJar = (alias) => {
return alias.substring(alias.lastIndexOf('.') + 1, alias.length) !== 'jar'
}
this.mainJarList = _.map(_.cloneDeep(this.store.state.dag.resourcesListS), v => {
return {
id: v.id,
code: v.alias,
disabled: isJar(v.alias)
}
})
resolve()
})
}
},
watch: {
@ -388,10 +383,12 @@
return {
mainClass: this.mainClass,
mainJar: {
res: this.mainJar
id: this.mainJar
},
deployMode: this.deployMode,
resourceList: this.cacheResourceList,
resourceList: _.map(this.resourceList, v => {
return {id: v}
}),
localParams: this.localParams,
driverCores: this.driverCores,
driverMemory: this.driverMemory,
@ -406,13 +403,18 @@
}
},
created () {
this._getResourcesList().then(() => {
let item = this.store.state.dag.resourcesListS
let items = this.store.state.dag.resourcesListJar
this.diGuiTree(item)
this.diGuiTree(items)
this.mainJarList = item
this.mainJarLists = items
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.mainClass = o.params.mainClass || ''
this.mainJar = o.params.mainJar && o.params.mainJar.res ? o.params.mainJar.res : ''
this.mainJar = o.params.mainJar && o.params.mainJar.id ? o.params.mainJar.id : ''
this.deployMode = o.params.deployMode || ''
this.driverCores = o.params.driverCores || 1
this.driverMemory = o.params.driverMemory || '512M'
@ -427,7 +429,9 @@
// backfill resourceList
let resourceList = o.params.resourceList || []
if (resourceList.length) {
this.resourceList = resourceList
this.resourceList = _.map(resourceList, v => {
return v.id
})
this.cacheResourceList = resourceList
}
@ -437,12 +441,11 @@
this.localParams = localParams
}
}
})
},
mounted () {
},
components: { mLocalParams, mListBox, mResources }
components: { mLocalParams, mListBox, mResources, Treeselect }
}
</script>

38
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js

@ -68,7 +68,7 @@ JSP.prototype.init = function ({ dag, instance, options }) {
// Register jsplumb connection type and configuration
this.JspInstance.registerConnectionType('basic', {
anchor: 'Continuous',
connector: 'Straight' // Line type
connector: 'Bezier' // Line type
})
// Initial configuration
@ -236,7 +236,7 @@ JSP.prototype.initNode = function (el) {
filter: '.ep',
anchor: 'Continuous',
connectorStyle: {
stroke: '#555',
stroke: '#2d8cf0',
strokeWidth: 2,
outlineStroke: 'transparent',
outlineWidth: 4
@ -297,6 +297,7 @@ JSP.prototype.tasksContextmenu = function (event) {
if (isOne) {
// start run
$('#startRunning').on('click', () => {
let name = store.state.dag.name
let id = router.history.current.params.id
store.dispatch('dag/getStartCheck', { processDefinitionId: id }).then(res => {
let modal = Vue.$modal.dialog({
@ -317,7 +318,8 @@ JSP.prototype.tasksContextmenu = function (event) {
},
props: {
item: {
id: id
id: id,
name: name
},
startNodeList: $name,
sourceType: 'contextmenu'
@ -378,7 +380,7 @@ JSP.prototype.tasksClick = function (e) {
$('.w').removeClass('jtk-tasks-active')
$(e.currentTarget).addClass('jtk-tasks-active')
if ($connect) {
setSvgColor($connect, '#555')
setSvgColor($connect, '#2d8cf0')
this.selectedElement.connect = null
}
this.selectedElement.id = $(e.currentTarget).attr('id')
@ -437,19 +439,19 @@ JSP.prototype.handleEventPointer = function (is) {
isClick: is,
isAttachment: false
})
wDom.removeClass('jtk-ep')
if (!is) {
wDom.removeClass('jtk-tasks-active')
this.selectedElement = {}
_.map($('#canvas svg'), v => {
if ($(v).attr('class')) {
_.map($(v).find('path'), v1 => {
$(v1).attr('fill', '#555')
$(v1).attr('stroke', '#555')
})
}
})
}
// wDom.removeClass('jtk-ep')
// if (!is) {
// wDom.removeClass('jtk-tasks-active')
// this.selectedElement = {}
// _.map($('#canvas svg'), v => {
// if ($(v).attr('class')) {
// _.map($(v).find('path'), v1 => {
// $(v1).attr('fill', '#555')
// $(v1).attr('stroke', '#555')
// })
// }
// })
// }
}
/**
@ -764,7 +766,7 @@ JSP.prototype.jspBackfill = function ({ connects, locations, largeJson }) {
source: sourceId,
target: targetId,
type: 'basic',
paintStyle: { strokeWidth: 2, stroke: '#555' }
paintStyle: { strokeWidth: 2, stroke: '#2d8cf0' }
})
})
})

2
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/util.js

@ -100,7 +100,7 @@ const setSvgColor = (e, color) => {
// Traverse clear all colors
$('.jtk-connector').each((i, o) => {
_.map($(o)[0].childNodes, v => {
$(v).attr('fill', '#555').attr('stroke', '#555').attr('stroke-width', 2)
$(v).attr('fill', '#2d8cf0').attr('stroke', '#2d8cf0').attr('stroke-width', 2)
})
})

4
dolphinscheduler-ui/src/js/conf/home/pages/dag/definitionDetails.vue

@ -41,7 +41,7 @@
props: {},
methods: {
...mapMutations('dag', ['resetParams', 'setIsDetails']),
...mapActions('dag', ['getProcessList','getProjectList', 'getResourcesList', 'getProcessDetails']),
...mapActions('dag', ['getProcessList','getProjectList', 'getResourcesList', 'getProcessDetails','getResourcesListJar']),
...mapActions('security', ['getTenantList','getWorkerGroupsAll']),
/**
* init
@ -60,6 +60,8 @@
this.getProjectList(),
// get resource
this.getResourcesList(),
// get jar
this.getResourcesListJar(),
// get worker group list
this.getWorkerGroupsAll(),
this.getTenantList()

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save