Browse Source

Merge pull request #2 from apache/dev

update code
pull/2/head
BoYiZhang 5 years ago committed by GitHub
parent
commit
6b16ec1fb8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 226
      charts/README.md
  2. 52
      charts/dolphinscheduler/Chart.yaml
  3. 226
      charts/dolphinscheduler/README.md
  4. 44
      charts/dolphinscheduler/templates/NOTES.txt
  5. 149
      charts/dolphinscheduler/templates/_helpers.tpl
  6. 41
      charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
  7. 34
      charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml
  8. 39
      charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml
  9. 228
      charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
  10. 161
      charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
  11. 102
      charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
  12. 43
      charts/dolphinscheduler/templates/ingress.yaml
  13. 35
      charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml
  14. 35
      charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml
  15. 35
      charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml
  16. 29
      charts/dolphinscheduler/templates/secret-external-postgresql.yaml
  17. 247
      charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
  18. 275
      charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
  19. 35
      charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml
  20. 35
      charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml
  21. 36
      charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml
  22. 36
      charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml
  23. 355
      charts/dolphinscheduler/values.yaml
  24. 33
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
  25. 152
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
  26. 29
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java
  27. 24
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java
  28. 193
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java
  29. 28
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java
  30. 100
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
  31. 130
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java
  32. 31
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java
  33. 12
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
  34. 32
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
  35. 34
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
  36. 8
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
  37. 559
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
  38. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java
  39. 4
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java
  40. 58
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
  41. 82
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java
  42. 112
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
  43. 7
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java
  44. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
  45. 12
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java
  46. 44
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java
  47. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java
  48. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java
  49. 4
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java
  50. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java
  51. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java
  52. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java
  53. 29
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java
  54. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java
  55. 20
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java
  56. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java
  57. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java
  58. 9
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java
  59. 20
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java
  60. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java
  61. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java
  62. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java
  63. 49
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
  64. 14
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/FlinkParametersTest.java
  65. 15
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java
  66. 15
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessDefinition.java
  67. 68
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Resource.java
  68. 11
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java
  69. 53
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.java
  70. 15
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.java
  71. 11
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
  72. 7
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml
  73. 79
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml
  74. 24
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml
  75. 57
      dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapperTest.java
  76. 50
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskScheduleThread.java
  77. 5
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java
  78. 25
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
  79. 26
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/mr/MapReduceTask.java
  80. 26
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTask.java
  81. 8
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
  82. 4
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTask.java
  83. 31
      dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/permission/PermissionCheck.java
  84. 26
      dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
  85. 24
      dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/_source/createDataSource.vue
  86. 0
      dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/_source/common.js
  87. 0
      dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/list.vue
  88. 0
      dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/index.vue
  89. 0
      dolphinscheduler-ui/src/js/conf/home/store/resource/actions.js
  90. 0
      dolphinscheduler-ui/src/js/module/components/fileUpdate/fileUpdate.vue
  91. 3
      dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
  92. 3
      dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
  93. 1
      pom.xml
  94. 2
      sql/soft_version
  95. 80
      sql/upgrade/1.2.2_schema/mysql/dolphinscheduler_ddl.sql
  96. 83
      sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_ddl.sql
  97. 2
      sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_dml.sql

226
charts/README.md

@ -0,0 +1,226 @@
# Dolphin Scheduler
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
## Introduction
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler
$ helm install --name dolphinscheduler .
```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `dolphinscheduler` deployment:
```bash
$ helm delete --purge dolphinscheduler
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
| | | |
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
| | | |
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `ingress.enabled` | Enable ingress | `false` |
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
| `ingress.path` | Ingress path | `/` |
| `ingress.tls.enabled` | Enable ingress tls | `false` |
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.

52
charts/dolphinscheduler/Chart.yaml

@ -0,0 +1,52 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v2
name: dolphinscheduler
description: Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
home: https://dolphinscheduler.apache.org
icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
keywords:
- dolphinscheduler
- Scheduler
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 1.2.1
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

226
charts/dolphinscheduler/README.md

@ -0,0 +1,226 @@
# Dolphin Scheduler
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
## Introduction
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler
$ helm install --name dolphinscheduler .
```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `dolphinscheduler` deployment:
```bash
$ helm delete --purge dolphinscheduler
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
| | | |
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
| | | |
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
| | | |
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `ingress.enabled` | Enable ingress | `false` |
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
| `ingress.path` | Ingress path | `/` |
| `ingress.tls.enabled` | Enable ingress tls | `false` |
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.

44
charts/dolphinscheduler/templates/NOTES.txt

@ -0,0 +1,44 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
** Please be patient while the chart is being deployed **
1. Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }}
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
echo "Dolphinscheduler URL: http://$HOSTNAME/"
{{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}
2. Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }}
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
echo "Dolphinscheduler URL: http://$HOSTNAME/"
{{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
{{- end }}

149
charts/dolphinscheduler/templates/_helpers.tpl

@ -0,0 +1,149 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "dolphinscheduler.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dolphinscheduler.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dolphinscheduler.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "dolphinscheduler.labels" -}}
helm.sh/chart: {{ include "dolphinscheduler.chart" . }}
{{ include "dolphinscheduler.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "dolphinscheduler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "dolphinscheduler.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "dolphinscheduler.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create a default docker image registry.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.image.registry" -}}
{{- $registry := default "docker.io" .Values.image.registry -}}
{{- printf "%s" $registry | trunc 63 | trimSuffix "/" -}}
{{- end -}}
{{/*
Create a default docker image repository.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.image.repository" -}}
{{- printf "%s/%s:%s" (include "dolphinscheduler.image.registry" .) .Values.image.repository .Values.image.tag -}}
{{- end -}}
{{/*
Create a default fully qualified postgresql name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.postgresql.fullname" -}}
{{- $name := default "postgresql" .Values.postgresql.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified zookkeeper name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.zookeeper.fullname" -}}
{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified zookkeeper quorum.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.zookeeper.quorum" -}}
{{- $port := default "2181" (.Values.zookeeper.service.port | toString) -}}
{{- printf "%s:%s" (include "dolphinscheduler.zookeeper.fullname" .) $port | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker base dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.base.dir" -}}
{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker data download dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.data.download.dir" -}}
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/download" -}}
{{- end -}}
{{/*
Create a default dolphinscheduler worker process exec dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.process.exec.dir" -}}
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/exec" -}}
{{- end -}}

41
charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml

@ -0,0 +1,41 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.alert.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-alert
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}
MAIL_SERVER_PORT: {{ .Values.alert.configmap.MAIL_SERVER_PORT | quote }}
MAIL_SENDER: {{ .Values.alert.configmap.MAIL_SENDER | quote }}
MAIL_USER: {{ .Values.alert.configmap.MAIL_USER | quote }}
MAIL_PASSWD: {{ .Values.alert.configmap.MAIL_PASSWD | quote }}
MAIL_SMTP_STARTTLS_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_STARTTLS_ENABLE | quote }}
MAIL_SMTP_SSL_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_SSL_ENABLE | quote }}
MAIL_SMTP_SSL_TRUST: {{ .Values.alert.configmap.MAIL_SMTP_SSL_TRUST | quote }}
ENTERPRISE_WECHAT_ENABLE: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_ENABLE | quote }}
ENTERPRISE_WECHAT_CORP_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_CORP_ID | quote }}
ENTERPRISE_WECHAT_SECRET: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_SECRET | quote }}
ENTERPRISE_WECHAT_AGENT_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_AGENT_ID | quote }}
ENTERPRISE_WECHAT_USERS: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_USERS | quote }}
{{- end }}

34
charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml

@ -0,0 +1,34 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.master.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }}
MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }}
MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }}
MASTER_TASK_COMMIT_RETRYTIMES: {{ .Values.master.configmap.MASTER_TASK_COMMIT_RETRYTIMES | quote }}
MASTER_TASK_COMMIT_INTERVAL: {{ .Values.master.configmap.MASTER_TASK_COMMIT_INTERVAL | quote }}
MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
{{- end }}

39
charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml

@ -0,0 +1,39 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.worker.configmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }}
WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }}
WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH: {{ include "dolphinscheduler.worker.data.download.dir" . | quote }}
DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH: {{ include "dolphinscheduler.worker.process.exec.dir" . | quote }}
dolphinscheduler_env.sh: |-
{{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
{{ . }}
{{- end }}
{{- end }}

228
charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml

@ -0,0 +1,228 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-alert
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
spec:
replicas: {{ .Values.alert.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
strategy:
type: {{ .Values.alert.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.alert.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.alert.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: alert
spec:
{{- if .Values.alert.affinity }}
affinity: {{- toYaml .Values.alert.affinity | nindent 8 }}
{{- end }}
{{- if .Values.alert.nodeSelector }}
nodeSelector: {{- toYaml .Values.alert.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.alert.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "alert-server"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
key: XLS_FILE_PATH
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_HOST
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_HOST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_PORT
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_PORT
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SENDER
valueFrom:
configMapKeyRef:
key: MAIL_SENDER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_USER
valueFrom:
configMapKeyRef:
key: MAIL_USER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_PASSWD
valueFrom:
configMapKeyRef:
key: MAIL_PASSWD
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_STARTTLS_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_STARTTLS_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_TRUST
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_TRUST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_ENABLE
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_CORP_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_CORP_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_SECRET
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_SECRET
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_AGENT_ID
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_AGENT_ID
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_USERS
valueFrom:
configMapKeyRef:
key: ENTERPRISE_WECHAT_USERS
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
{{- if .Values.alert.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.alert.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.alert.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.alert.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.alert.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.alert.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.alert.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.alert.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.alert.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.alert.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.alert.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.alert.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-alert
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
{{- if .Values.alert.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-alert
{{- else }}
emptyDir: {}
{{- end }}

161
charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml

@ -0,0 +1,161 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
spec:
replicas: {{ .Values.api.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
strategy:
type: {{ .Values.api.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.api.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.api.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api
spec:
{{- if .Values.api.affinity }}
affinity: {{- toYaml .Values.api.affinity | nindent 8 }}
{{- end }}
{{- if .Values.api.nodeSelector }}
nodeSelector: {{- toYaml .Values.api.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.api.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-api
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "api-server"
ports:
- containerPort: 12345
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
{{- if .Values.api.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: 12345
initialDelaySeconds: {{ .Values.api.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.api.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.api.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.api.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.api.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.api.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: 12345
initialDelaySeconds: {{ .Values.api.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.api.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.api.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.api.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.api.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-api
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-api
{{- if .Values.api.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-api
{{- else }}
emptyDir: {}
{{- end }}

102
charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml

@ -0,0 +1,102 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
spec:
replicas: {{ .Values.frontend.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
strategy:
type: {{ .Values.frontend.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.frontend.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.frontend.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
spec:
{{- if .Values.frontend.affinity }}
affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
{{- end }}
{{- if .Values.frontend.nodeSelector }}
nodeSelector: {{- toYaml .Values.frontend.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.frontend.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "frontend"
ports:
- containerPort: 8888
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: FRONTEND_API_SERVER_HOST
value: '{{ include "dolphinscheduler.fullname" . }}-api'
- name: FRONTEND_API_SERVER_PORT
value: "12345"
{{- if .Values.frontend.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.frontend.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/var/log/nginx"
name: {{ include "dolphinscheduler.fullname" . }}-frontend
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- else }}
emptyDir: {}
{{- end }}

43
charts/dolphinscheduler/templates/ingress.yaml

@ -0,0 +1,43 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ include "dolphinscheduler.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: {{ .Values.ingress.path }}
backend:
serviceName: {{ include "dolphinscheduler.fullname" . }}-frontend
servicePort: tcp-port
{{- if .Values.ingress.tls.enabled }}
tls:
hosts:
{{- range .Values.ingress.tls.hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end }}
{{- end }}

35
charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.alert.persistentVolumeClaim.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-alert
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.alert.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.alert.persistentVolumeClaim.storage | quote }}
{{- end }}

35
charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.api.persistentVolumeClaim.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.api.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.api.persistentVolumeClaim.storage | quote }}
{{- end }}

35
charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.frontend.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.frontend.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.frontend.persistentVolumeClaim.storage | quote }}
{{- end }}

29
charts/dolphinscheduler/templates/secret-external-postgresql.yaml

@ -0,0 +1,29 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if not .Values.postgresql.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-postgresql
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
type: Opaque
data:
db-password: {{ .Values.externalDatabase.password | b64enc | quote }}
{{- end }}

247
charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml

@ -0,0 +1,247 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
spec:
podManagementPolicy: {{ .Values.master.podManagementPolicy }}
replicas: {{ .Values.master.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
serviceName: {{ template "dolphinscheduler.fullname" . }}-master-headless
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master
spec:
{{- if .Values.master.affinity }}
affinity: {{- toYaml .Values.master.affinity | nindent 8 }}
{{- end }}
{{- if .Values.master.nodeSelector }}
nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-master
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "master-server"
ports:
- containerPort: 8888
name: unused-tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: MASTER_EXEC_THREADS
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_EXEC_THREADS
- name: MASTER_EXEC_TASK_NUM
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_EXEC_TASK_NUM
- name: MASTER_HEARTBEAT_INTERVAL
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_HEARTBEAT_INTERVAL
- name: MASTER_TASK_COMMIT_RETRYTIMES
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_TASK_COMMIT_RETRYTIMES
- name: MASTER_TASK_COMMIT_INTERVAL
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_TASK_COMMIT_INTERVAL
- name: MASTER_MAX_CPULOAD_AVG
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_MAX_CPULOAD_AVG
- name: MASTER_RESERVED_MEMORY
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
key: MASTER_RESERVED_MEMORY
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: TASK_QUEUE
{{- if .Values.zookeeper.enabled }}
value: {{ .Values.zookeeper.taskQueue }}
{{- else }}
value: {{ .Values.externalZookeeper.taskQueue }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: {{ template "dolphinscheduler.zookeeper.quorum" . }}
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
{{- if .Values.master.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- master-server
initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.master.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- master-server
initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-master
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-master
{{- if .Values.master.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-master
{{- else }}
emptyDir: {}
{{- end }}
{{- if .Values.master.persistentVolumeClaim.enabled }}
volumeClaimTemplates:
- metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.master.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.master.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.master.persistentVolumeClaim.storage | quote }}
{{- end }}

275
charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml

@ -0,0 +1,275 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
spec:
podManagementPolicy: {{ .Values.worker.podManagementPolicy }}
replicas: {{ .Values.worker.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
serviceName: {{ template "dolphinscheduler.fullname" . }}-worker-headless
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker
spec:
{{- if .Values.worker.affinity }}
affinity: {{- toYaml .Values.worker.affinity | nindent 8 }}
{{- end }}
{{- if .Values.worker.nodeSelector }}
nodeSelector: {{- toYaml .Values.worker.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.worker.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-postgresql
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to postgresql."
exit 1
fi
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
sleep 60
done
env:
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-worker
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "worker-server"
ports:
- containerPort: 50051
name: "logs-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: WORKER_EXEC_THREADS
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_EXEC_THREADS
- name: WORKER_FETCH_TASK_NUM
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_FETCH_TASK_NUM
- name: WORKER_HEARTBEAT_INTERVAL
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_HEARTBEAT_INTERVAL
- name: WORKER_MAX_CPULOAD_AVG
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_MAX_CPULOAD_AVG
- name: WORKER_RESERVED_MEMORY
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_RESERVED_MEMORY
- name: POSTGRESQL_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: POSTGRESQL_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port }}
{{- end }}
- name: POSTGRESQL_USERNAME
{{- if .Values.postgresql.enabled }}
value: {{ .Values.postgresql.postgresqlUsername }}
{{- else }}
value: {{ .Values.externalDatabase.username | quote }}
{{- end }}
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.postgresql.enabled }}
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
key: postgresql-password
{{- else }}
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
key: db-password
{{- end }}
- name: TASK_QUEUE
{{- if .Values.zookeeper.enabled }}
value: {{ .Values.zookeeper.taskQueue }}
{{- else }}
value: {{ .Values.externalZookeeper.taskQueue }}
{{- end }}
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
{{- if .Values.worker.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.worker.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.worker.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.worker.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- sh
- /root/checkpoint.sh
- worker-server
initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.worker.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
- mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"
subPath: "dolphinscheduler_env.sh"
name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-worker-data
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-worker-data
{{- else }}
emptyDir: {}
{{- end }}
- name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
{{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-worker-logs
{{- else }}
emptyDir: {}
{{- end }}
- name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
configMap:
defaultMode: 0777
name: {{ include "dolphinscheduler.fullname" . }}-worker
items:
- key: dolphinscheduler_env.sh
path: dolphinscheduler_env.sh
{{- if .Values.worker.persistentVolumeClaim.enabled }}
volumeClaimTemplates:
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
- metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-data
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storage | quote }}
{{- end }}
{{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
- metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storage | quote }}
{{- end }}
{{- end }}

35
charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
ports:
- port: 12345
targetPort: tcp-port
protocol: TCP
name: tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: api

35
charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
ports:
- port: 8888
targetPort: tcp-port
protocol: TCP
name: tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend

36
charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml

@ -0,0 +1,36 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-master-headless
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master-headless
app.kubernetes.io/instance: {{ .Release.Name }}-master-headless
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
clusterIP: "None"
ports:
- port: 8888
targetPort: tcp-port
protocol: TCP
name: unused-tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: master

36
charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml

@ -0,0 +1,36 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Service
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
app.kubernetes.io/instance: {{ .Release.Name }}-worker-headless
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
clusterIP: "None"
ports:
- port: 50051
targetPort: logs-port
protocol: TCP
name: logs-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: worker

355
charts/dolphinscheduler/values.yaml

@ -0,0 +1,355 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Default values for dolphinscheduler-chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
nameOverride: ""
fullnameOverride: ""
timezone: "Asia/Shanghai"
image:
registry: "docker.io"
repository: "dolphinscheduler"
tag: "1.2.1"
pullPolicy: "IfNotPresent"
imagePullSecrets: []
# If not exists external postgresql, by default, Dolphinscheduler's database will use it.
postgresql:
enabled: true
postgresqlUsername: "root"
postgresqlPassword: "root"
postgresqlDatabase: "dolphinscheduler"
persistence:
enabled: false
size: "20Gi"
storageClass: "-"
# If exists external postgresql, and set postgresql.enable value to false.
# If postgresql.enable is false, Dolphinscheduler's database will use it.
externalDatabase:
host: "localhost"
port: "5432"
username: "root"
password: "root"
database: "dolphinscheduler"
# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
zookeeper:
enabled: true
taskQueue: "zookeeper"
persistence:
enabled: false
size: "20Gi"
storageClass: "-"
# If exists external zookeeper, and set zookeeper.enable value to false.
# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
externalZookeeper:
taskQueue: "zookeeper"
zookeeperQuorum: "127.0.0.1:2181"
master:
podManagementPolicy: "Parallel"
replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
MASTER_HEARTBEAT_INTERVAL: "10"
MASTER_TASK_COMMIT_RETRYTIMES: "5"
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
worker:
podManagementPolicy: "Parallel"
replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
configmap:
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
DOLPHINSCHEDULER_ENV:
- "export HADOOP_HOME=/opt/soft/hadoop"
- "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
- "export SPARK_HOME1=/opt/soft/spark1"
- "export SPARK_HOME2=/opt/soft/spark2"
- "export PYTHON_HOME=/opt/soft/python"
- "export JAVA_HOME=/opt/soft/java"
- "export HIVE_HOME=/opt/soft/hive"
- "export FLINK_HOME=/opt/soft/flink"
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
## dolphinscheduler data volume
dataPersistentVolume:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
## dolphinscheduler logs volume
logsPersistentVolume:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
alert:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap:
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: false
MAIL_SMTP_SSL_ENABLE: false
MAIL_SMTP_SSL_TRUST: ""
ENTERPRISE_WECHAT_ENABLE: false
ENTERPRISE_WECHAT_CORP_ID: ""
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
api:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
frontend:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
ingress:
enabled: false
host: "dolphinscheduler.org"
path: "/"
tls:
enabled: false
hosts:
- "dolphinscheduler.org"
secretName: "dolphinscheduler-tls"

33
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java

@ -16,18 +16,19 @@
*/ */
package org.apache.dolphinscheduler.api.controller; package org.apache.dolphinscheduler.api.controller;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.DataSourceService; import org.apache.dolphinscheduler.api.service.DataSourceService;
import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.entity.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
@ -76,6 +77,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"), @ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"), @ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"), @ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String") @ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
}) })
@PostMapping(value = "/create") @PostMapping(value = "/create")
@ -90,11 +92,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal, @RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName, @RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password, @RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) { @RequestParam(value = "other") String other) {
logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {},port: {},database : {},principal: {},userName : {} other: {}", logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {}, port: {}, database : {}, principal: {}, userName : {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, host,port,database,principal,userName,other); loginUser.getUserName(), name, note, type, host, port, database, principal, userName, connectType, other);
try { try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other); String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Map<String, Object> result = dataSourceService.createDataSource(loginUser, name, note, type, parameter); Map<String, Object> result = dataSourceService.createDataSource(loginUser, name, note, type, parameter);
return returnDataList(result); return returnDataList(result);
@ -133,6 +136,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"), @ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"), @ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"), @ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String") @ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
}) })
@PostMapping(value = "/update") @PostMapping(value = "/update")
@ -148,11 +152,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal, @RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName, @RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password, @RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) { @RequestParam(value = "other") String other) {
logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, other: {}", logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, other); loginUser.getUserName(), name, note, type, connectType, other);
try { try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, other); String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, connectType, other);
Map<String, Object> dataSource = dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter); Map<String, Object> dataSource = dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter);
return returnDataList(dataSource); return returnDataList(dataSource);
} catch (Exception e) { } catch (Exception e) {
@ -277,6 +282,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"), @ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"), @ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"), @ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String") @ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
}) })
@PostMapping(value = "/connect") @PostMapping(value = "/connect")
@ -291,11 +297,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal, @RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName, @RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password, @RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) { @RequestParam(value = "other") String other) {
logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, other: {}", logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, other); loginUser.getUserName(), name, note, type, connectType, other);
try { try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other); String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Boolean isConnection = dataSourceService.checkConnection(type, parameter); Boolean isConnection = dataSourceService.checkConnection(type, parameter);
Result result = new Result(); Result result = new Result();

152
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java

@ -60,6 +60,50 @@ public class ResourcesController extends BaseController{
@Autowired @Autowired
private UdfFuncService udfFuncService; private UdfFuncService udfFuncService;
/**
* create resource
*
* @param loginUser login user
* @param alias alias
* @param description description
* @param type type
* @return create result code
*/
/**
*
* @param loginUser login user
* @param type type
* @param alias alias
* @param description description
* @param pid parent id
* @param currentDir current directory
* @return
*/
@ApiOperation(value = "createDirctory", notes= "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType ="String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/directory/create")
public Result createDirectory(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value ="name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value ="pid") int pid,
@RequestParam(value ="currentDir") String currentDir) {
try {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(),type, alias, description,pid,currentDir);
return resourceService.createDirectory(loginUser,alias, description,type ,pid,currentDir);
} catch (Exception e) {
logger.error(CREATE_RESOURCE_ERROR.getMsg(),e);
return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg());
}
}
/** /**
* create resource * create resource
* *
@ -80,13 +124,15 @@ public class ResourcesController extends BaseController{
@PostMapping(value = "/create") @PostMapping(value = "/create")
public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type, @RequestParam(value = "type") ResourceType type,
@RequestParam(value ="name")String alias, @RequestParam(value ="name") String alias,
@RequestParam(value = "description", required = false) String description, @RequestParam(value = "description", required = false) String description,
@RequestParam("file") MultipartFile file) { @RequestParam("file") MultipartFile file,
@RequestParam(value ="pid") int pid,
@RequestParam(value ="currentDir") String currentDir) {
try { try {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}", logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(),type, alias, description, file.getName(), file.getOriginalFilename()); loginUser.getUserName(),type, alias, description, file.getName(), file.getOriginalFilename());
return resourceService.createResource(loginUser,alias, description,type ,file); return resourceService.createResource(loginUser,alias, description,type ,file,pid,currentDir);
} catch (Exception e) { } catch (Exception e) {
logger.error(CREATE_RESOURCE_ERROR.getMsg(),e); logger.error(CREATE_RESOURCE_ERROR.getMsg(),e);
return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg()); return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg());
@ -120,7 +166,7 @@ public class ResourcesController extends BaseController{
try { try {
logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}", logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}",
loginUser.getUserName(),type, alias, description); loginUser.getUserName(),type, alias, description);
return resourceService.updateResource(loginUser,resourceId,alias, description,type); return resourceService.updateResource(loginUser,resourceId,alias,description,type);
} catch (Exception e) { } catch (Exception e) {
logger.error(UPDATE_RESOURCE_ERROR.getMsg(),e); logger.error(UPDATE_RESOURCE_ERROR.getMsg(),e);
return error(Status.UPDATE_RESOURCE_ERROR.getCode(), Status.UPDATE_RESOURCE_ERROR.getMsg()); return error(Status.UPDATE_RESOURCE_ERROR.getCode(), Status.UPDATE_RESOURCE_ERROR.getMsg());
@ -166,6 +212,7 @@ public class ResourcesController extends BaseController{
@ApiOperation(value = "queryResourceListPaging", notes= "QUERY_RESOURCE_LIST_PAGING_NOTES") @ApiOperation(value = "queryResourceListPaging", notes= "QUERY_RESOURCE_LIST_PAGING_NOTES")
@ApiImplicitParams({ @ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"), @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="int"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType ="String"), @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType ="String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType ="Int",example = "20") @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType ="Int",example = "20")
@ -174,6 +221,7 @@ public class ResourcesController extends BaseController{
@ResponseStatus(HttpStatus.OK) @ResponseStatus(HttpStatus.OK)
public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="type") ResourceType type, @RequestParam(value ="type") ResourceType type,
@RequestParam(value ="id") int id,
@RequestParam("pageNo") Integer pageNo, @RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize @RequestParam("pageSize") Integer pageSize
@ -187,7 +235,7 @@ public class ResourcesController extends BaseController{
} }
searchVal = ParameterUtils.handleEscapes(searchVal); searchVal = ParameterUtils.handleEscapes(searchVal);
result = resourceService.queryResourceListPaging(loginUser,type,searchVal,pageNo, pageSize); result = resourceService.queryResourceListPaging(loginUser,id,type,searchVal,pageNo, pageSize);
return returnDataListPaging(result); return returnDataListPaging(result);
}catch (Exception e){ }catch (Exception e){
logger.error(QUERY_RESOURCES_LIST_PAGING.getMsg(),e); logger.error(QUERY_RESOURCES_LIST_PAGING.getMsg(),e);
@ -227,32 +275,89 @@ public class ResourcesController extends BaseController{
* verify resource by alias and type * verify resource by alias and type
* *
* @param loginUser login user * @param loginUser login user
* @param alias resource name * @param fullName resource full name
* @param type resource type * @param type resource type
* @return true if the resource name not exists, otherwise return false * @return true if the resource name not exists, otherwise return false
*/ */
@ApiOperation(value = "verifyResourceName", notes= "VERIFY_RESOURCE_NAME_NOTES") @ApiOperation(value = "verifyResourceName", notes= "VERIFY_RESOURCE_NAME_NOTES")
@ApiImplicitParams({ @ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"), @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String") @ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType ="String")
}) })
@GetMapping(value = "/verify-name") @GetMapping(value = "/verify-name")
@ResponseStatus(HttpStatus.OK) @ResponseStatus(HttpStatus.OK)
public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="name") String alias, @RequestParam(value ="fullName") String fullName,
@RequestParam(value ="type") ResourceType type @RequestParam(value ="type") ResourceType type
) { ) {
try { try {
logger.info("login user {}, verfiy resource alias: {},resource type: {}", logger.info("login user {}, verfiy resource alias: {},resource type: {}",
loginUser.getUserName(), alias,type); loginUser.getUserName(), fullName,type);
return resourceService.verifyResourceName(alias,type,loginUser); return resourceService.verifyResourceName(fullName,type,loginUser);
} catch (Exception e) { } catch (Exception e) {
logger.error(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg(), e); logger.error(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg(), e);
return error(Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getCode(), Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg()); return error(Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getCode(), Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg());
} }
} }
/**
* query resources jar list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceJarList", notes= "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType")
})
@GetMapping(value="/list/jar")
@ResponseStatus(HttpStatus.OK)
public Result queryResourceJarList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="type") ResourceType type
){
try{
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString());
Map<String, Object> result = resourceService.queryResourceJarList(loginUser, type);
return returnDataList(result);
}catch (Exception e){
logger.error(QUERY_RESOURCES_LIST_ERROR.getMsg(),e);
return error(Status.QUERY_RESOURCES_LIST_ERROR.getCode(), Status.QUERY_RESOURCES_LIST_ERROR.getMsg());
}
}
/**
* query resource by full name and type
*
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "queryResource", notes= "QUERY_BY_RESOURCE_NAME")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType ="String")
})
@GetMapping(value = "/queryResource")
@ResponseStatus(HttpStatus.OK)
public Result queryResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="fullName",required = false) String fullName,
@RequestParam(value ="id",required = false) Integer id,
@RequestParam(value ="type") ResourceType type
) {
try {
logger.info("login user {}, query resource by full name: {} or id: {},resource type: {}",
loginUser.getUserName(), fullName,id,type);
return resourceService.queryResource(fullName,id,type);
} catch (Exception e) {
logger.error(RESOURCE_NOT_EXIST.getMsg(), e);
return error(Status.RESOURCE_NOT_EXIST.getCode(), Status.RESOURCE_NOT_EXIST.getMsg());
}
}
/** /**
* view resource file online * view resource file online
* *
@ -310,16 +415,18 @@ public class ResourcesController extends BaseController{
@RequestParam(value ="fileName")String fileName, @RequestParam(value ="fileName")String fileName,
@RequestParam(value ="suffix")String fileSuffix, @RequestParam(value ="suffix")String fileSuffix,
@RequestParam(value = "description", required = false) String description, @RequestParam(value = "description", required = false) String description,
@RequestParam(value = "content") String content @RequestParam(value = "content") String content,
@RequestParam(value ="pid") int pid,
@RequestParam(value ="currentDir") String currentDir
) { ) {
try{ try{
logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}", logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}",
loginUser.getUserName(),fileName,type,fileSuffix,description,content); loginUser.getUserName(),fileName,type,fileSuffix,description,content,pid,currentDir);
if(StringUtils.isEmpty(content)){ if(StringUtils.isEmpty(content)){
logger.error("resource file contents are not allowed to be empty"); logger.error("resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg()); return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
} }
return resourceService.onlineCreateResource(loginUser,type,fileName,fileSuffix,description,content); return resourceService.onlineCreateResource(loginUser,type,fileName,fileSuffix,description,content,pid,currentDir);
}catch (Exception e){ }catch (Exception e){
logger.error(CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg(),e); logger.error(CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg(),e);
return error(Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getCode(), Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg()); return error(Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getCode(), Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg());
@ -384,6 +491,9 @@ public class ResourcesController extends BaseController{
.ok() .ok()
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"") .header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"")
.body(file); .body(file);
}catch (RuntimeException e){
logger.error(e.getMessage(),e);
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(e.getMessage());
}catch (Exception e){ }catch (Exception e){
logger.error(DOWNLOAD_RESOURCE_FILE_ERROR.getMsg(),e); logger.error(DOWNLOAD_RESOURCE_FILE_ERROR.getMsg(),e);
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.DOWNLOAD_RESOURCE_FILE_ERROR.getMsg()); return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.DOWNLOAD_RESOURCE_FILE_ERROR.getMsg());
@ -658,21 +768,21 @@ public class ResourcesController extends BaseController{
* @param userId user id * @param userId user id
* @return unauthorized result code * @return unauthorized result code
*/ */
@ApiOperation(value = "unauthorizedFile", notes= "UNAUTHORIZED_FILE_NOTES") @ApiOperation(value = "authorizeResourceTree", notes= "AUTHORIZE_RESOURCE_TREE_NOTES")
@ApiImplicitParams({ @ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType ="Int", example = "100") @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType ="Int", example = "100")
}) })
@GetMapping(value = "/unauth-file") @GetMapping(value = "/authorize-resource-tree")
@ResponseStatus(HttpStatus.CREATED) @ResponseStatus(HttpStatus.CREATED)
public Result unauthorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, public Result authorizeResourceTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) { @RequestParam("userId") Integer userId) {
try{ try{
logger.info("resource unauthorized file, user:{}, unauthorized user id:{}", loginUser.getUserName(), userId); logger.info("all resource file, user:{}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.unauthorizedFile(loginUser, userId); Map<String, Object> result = resourceService.authorizeResourceTree(loginUser, userId);
return returnDataList(result); return returnDataList(result);
}catch (Exception e){ }catch (Exception e){
logger.error(UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg(),e); logger.error(AUTHORIZE_RESOURCE_TREE.getMsg(),e);
return error(Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getCode(), Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg()); return error(Status.AUTHORIZE_RESOURCE_TREE.getCode(), Status.AUTHORIZE_RESOURCE_TREE.getMsg());
} }
} }

29
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java

@ -0,0 +1,29 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* directory
*/
public class Directory extends ResourceComponent{
@Override
public boolean isDirctory() {
return true;
}
}

24
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java

@ -0,0 +1,24 @@
package org.apache.dolphinscheduler.api.dto.resources;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* file leaf
*/
public class FileLeaf extends ResourceComponent{
}

193
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java

@ -0,0 +1,193 @@
package org.apache.dolphinscheduler.api.dto.resources;
import com.alibaba.fastjson.annotation.JSONField;
import com.alibaba.fastjson.annotation.JSONType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import java.util.ArrayList;
import java.util.List;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* resource component
*/
@JSONType(orders={"id","pid","name","fullName","description","isDirctory","children","type"})
public abstract class ResourceComponent {
public ResourceComponent() {
}
public ResourceComponent(int id, int pid, String name, String fullName, String description, boolean isDirctory) {
this.id = id;
this.pid = pid;
this.name = name;
this.fullName = fullName;
this.description = description;
this.isDirctory = isDirctory;
int directoryFlag = isDirctory ? 1:0;
this.idValue = String.format("%s_%s",id,directoryFlag);
}
/**
* id
*/
@JSONField(ordinal = 1)
protected int id;
/**
* parent id
*/
@JSONField(ordinal = 2)
protected int pid;
/**
* name
*/
@JSONField(ordinal = 3)
protected String name;
/**
* current directory
*/
protected String currentDir;
/**
* full name
*/
@JSONField(ordinal = 4)
protected String fullName;
/**
* description
*/
@JSONField(ordinal = 5)
protected String description;
/**
* is directory
*/
@JSONField(ordinal = 6)
protected boolean isDirctory;
/**
* id value
*/
@JSONField(ordinal = 7)
protected String idValue;
/**
* resoruce type
*/
@JSONField(ordinal = 8)
protected ResourceType type;
/**
* children
*/
@JSONField(ordinal = 8)
protected List<ResourceComponent> children = new ArrayList<>();
/**
* add resource component
* @param resourceComponent resource component
*/
public void add(ResourceComponent resourceComponent){
children.add(resourceComponent);
}
public String getName(){
return this.name;
}
public String getDescription(){
return this.description;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public int getPid() {
return pid;
}
public void setPid(int pid) {
this.pid = pid;
}
public void setName(String name) {
this.name = name;
}
public String getFullName() {
return fullName;
}
public void setFullName(String fullName) {
this.fullName = fullName;
}
public void setDescription(String description) {
this.description = description;
}
public boolean isDirctory() {
return isDirctory;
}
public void setDirctory(boolean dirctory) {
isDirctory = dirctory;
}
public String getIdValue() {
return idValue;
}
public void setIdValue(int id,boolean isDirctory) {
int directoryFlag = isDirctory ? 1:0;
this.idValue = String.format("%s_%s",id,directoryFlag);
}
public ResourceType getType() {
return type;
}
public void setType(ResourceType type) {
this.type = type;
}
public List<ResourceComponent> getChildren() {
return children;
}
public void setChildren(List<ResourceComponent> children) {
this.children = children;
}
@Override
public String toString() {
return "ResourceComponent{" +
"id=" + id +
", pid=" + pid +
", name='" + name + '\'' +
", currentDir='" + currentDir + '\'' +
", fullName='" + fullName + '\'' +
", description='" + description + '\'' +
", isDirctory=" + isDirctory +
", idValue='" + idValue + '\'' +
", type=" + type +
", children=" + children +
'}';
}
}

28
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.List;
/**
* interface filter
*/
public interface IFilter {
List<Resource> filter();
}

100
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java

@ -0,0 +1,100 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/**
* resource filter
*/
public class ResourceFilter implements IFilter {
/**
* resource suffix
*/
private String suffix;
/**
* resource list
*/
private List<Resource> resourceList;
/**
* parent list
*/
//Set<Resource> parentList = new HashSet<>();
/**
* constructor
* @param suffix resource suffix
* @param resourceList resource list
*/
public ResourceFilter(String suffix, List<Resource> resourceList) {
this.suffix = suffix;
this.resourceList = resourceList;
}
/**
* file filter
* @return file filtered by suffix
*/
public Set<Resource> fileFilter(){
Set<Resource> resources = resourceList.stream().filter(t -> {
String alias = t.getAlias();
return alias.endsWith(suffix);
}).collect(Collectors.toSet());
return resources;
}
/**
* list all parent dir
* @return parent resource dir set
*/
Set<Resource> listAllParent(){
Set<Resource> parentList = new HashSet<>();
Set<Resource> filterFileList = fileFilter();
for(Resource file:filterFileList){
parentList.add(file);
setAllParent(file,parentList);
}
return parentList;
}
/**
* list all parent dir
* @param resource resource
* @return parent resource dir set
*/
private void setAllParent(Resource resource,Set<Resource> parentList){
for (Resource resourceTemp : resourceList) {
if (resourceTemp.getId() == resource.getPid()) {
parentList.add(resourceTemp);
setAllParent(resourceTemp,parentList);
}
}
}
@Override
public List<Resource> filter() {
return new ArrayList<>(listAllParent());
}
}

130
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java

@ -0,0 +1,130 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.Directory;
import org.apache.dolphinscheduler.api.dto.resources.FileLeaf;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.ArrayList;
import java.util.List;
/**
* resource tree visitor
*/
public class ResourceTreeVisitor implements Visitor{
/**
* resource list
*/
private List<Resource> resourceList;
public ResourceTreeVisitor() {
}
/**
* constructor
* @param resourceList resource list
*/
public ResourceTreeVisitor(List<Resource> resourceList) {
this.resourceList = resourceList;
}
/**
* visit
* @return resoruce component
*/
public ResourceComponent visit() {
ResourceComponent rootDirectory = new Directory();
for (Resource resource : resourceList) {
// judge whether is root node
if (rootNode(resource)){
ResourceComponent tempResourceComponent = getResourceComponent(resource);
rootDirectory.add(tempResourceComponent);
tempResourceComponent.setChildren(setChildren(tempResourceComponent.getId(),resourceList));
}
}
return rootDirectory;
}
/**
* set children
* @param id id
* @param list resource list
* @return resource component list
*/
public static List<ResourceComponent> setChildren(int id, List<Resource> list ){
List<ResourceComponent> childList = new ArrayList<>();
for (Resource resource : list) {
if (id == resource.getPid()){
ResourceComponent tempResourceComponent = getResourceComponent(resource);
childList.add(tempResourceComponent);
}
}
for (ResourceComponent resourceComponent : childList) {
resourceComponent.setChildren(setChildren(resourceComponent.getId(),list));
}
if (childList.size()==0){
return new ArrayList<>();
}
return childList;
}
/**
* Determine whether it is the root node
* @param resource resource
* @return true if it is the root node
*/
public boolean rootNode(Resource resource) {
boolean isRootNode = true;
if(resource.getPid() != -1 ){
for (Resource parent : resourceList) {
if (resource.getPid() == parent.getId()) {
isRootNode = false;
break;
}
}
}
return isRootNode;
}
/**
* get resource component by resource
* @param resource resource
* @return resource component
*/
private static ResourceComponent getResourceComponent(Resource resource) {
ResourceComponent tempResourceComponent;
if(resource.isDirectory()){
tempResourceComponent = new Directory();
}else{
tempResourceComponent = new FileLeaf();
}
tempResourceComponent.setName(resource.getAlias());
tempResourceComponent.setFullName(resource.getFullName().replaceFirst("/",""));
tempResourceComponent.setId(resource.getId());
tempResourceComponent.setPid(resource.getPid());
tempResourceComponent.setIdValue(resource.getId(),resource.isDirectory());
tempResourceComponent.setDescription(resource.getDescription());
tempResourceComponent.setType(resource.getType());
return tempResourceComponent;
}
}

31
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java

@ -0,0 +1,31 @@
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Visitor
*/
public interface Visitor {
/**
* visit
* @return resource component
*/
ResourceComponent visit();
}

12
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java

@ -97,7 +97,7 @@ public enum Status {
VERIFY_UDF_FUNCTION_NAME_ERROR( 10070,"verify udf function name error", "UDF函数名称验证错误"), VERIFY_UDF_FUNCTION_NAME_ERROR( 10070,"verify udf function name error", "UDF函数名称验证错误"),
DELETE_UDF_FUNCTION_ERROR( 10071,"delete udf function error", "删除UDF函数错误"), DELETE_UDF_FUNCTION_ERROR( 10071,"delete udf function error", "删除UDF函数错误"),
AUTHORIZED_FILE_RESOURCE_ERROR( 10072,"authorized file resource error", "授权资源文件错误"), AUTHORIZED_FILE_RESOURCE_ERROR( 10072,"authorized file resource error", "授权资源文件错误"),
UNAUTHORIZED_FILE_RESOURCE_ERROR( 10073,"unauthorized file resource error", "查询未授权资源错误"), AUTHORIZE_RESOURCE_TREE( 10073,"authorize resource tree display error","授权资源目录树错误"),
UNAUTHORIZED_UDF_FUNCTION_ERROR( 10074,"unauthorized udf function error", "查询未授权UDF函数错误"), UNAUTHORIZED_UDF_FUNCTION_ERROR( 10074,"unauthorized udf function error", "查询未授权UDF函数错误"),
AUTHORIZED_UDF_FUNCTION_ERROR(10075,"authorized udf function error", "授权UDF函数错误"), AUTHORIZED_UDF_FUNCTION_ERROR(10075,"authorized udf function error", "授权UDF函数错误"),
CREATE_SCHEDULE_ERROR(10076,"create schedule error", "创建调度配置错误"), CREATE_SCHEDULE_ERROR(10076,"create schedule error", "创建调度配置错误"),
@ -184,10 +184,12 @@ public enum Status {
RESOURCE_SIZE_EXCEED_LIMIT(20007, "upload resource file size exceeds limit", "上传资源文件大小超过限制"), RESOURCE_SIZE_EXCEED_LIMIT(20007, "upload resource file size exceeds limit", "上传资源文件大小超过限制"),
RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified", "资源文件后缀不支持修改"), RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified", "资源文件后缀不支持修改"),
UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar", "UDF资源文件后缀名只支持[jar]"), UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar", "UDF资源文件后缀名只支持[jar]"),
HDFS_COPY_FAIL(20009, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"), HDFS_COPY_FAIL(20010, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"),
RESOURCE_FILE_EXIST(20010, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"), RESOURCE_FILE_EXIST(20011, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"),
RESOURCE_FILE_NOT_EXIST(20011, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"), RESOURCE_FILE_NOT_EXIST(20012, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"),
UDF_RESOURCE_IS_BOUND(20013, "udf resource file is bound by UDF functions:{0}","udf函数绑定了资源文件[{0}]"),
RESOURCE_IS_USED(20014, "resource file is used by process definition","资源文件被上线的流程定义使用了"),
PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist","父资源文件不存在"),
USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"), USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"),

32
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java

@ -17,10 +17,15 @@
package org.apache.dolphinscheduler.api.service; package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils;
@ -30,10 +35,6 @@ import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -473,12 +474,19 @@ public class DataSourceService extends BaseService{
* @return datasource parameter * @return datasource parameter
*/ */
public String buildParameter(String name, String desc, DbType type, String host, public String buildParameter(String name, String desc, DbType type, String host,
String port, String database,String principal,String userName, String port, String database, String principal, String userName,
String password, String other) { String password, DbConnectType connectType, String other) {
String address = buildAddress(type, host, port, connectType);
String address = buildAddress(type, host, port); String jdbcUrl;
if (Constants.ORACLE.equals(type.name())
&& connectType == DbConnectType.ORACLE_SID) {
jdbcUrl = address + ":" + database;
} else {
jdbcUrl = address + "/" + database;
}
String jdbcUrl = address + "/" + database;
if (CommonUtils.getKerberosStartupState() && if (CommonUtils.getKerberosStartupState() &&
(type == DbType.HIVE || type == DbType.SPARK)){ (type == DbType.HIVE || type == DbType.SPARK)){
jdbcUrl += ";principal=" + principal; jdbcUrl += ";principal=" + principal;
@ -531,7 +539,7 @@ public class DataSourceService extends BaseService{
} }
private String buildAddress(DbType type, String host, String port) { private String buildAddress(DbType type, String host, String port, DbConnectType connectType) {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
if (Constants.MYSQL.equals(type.name())) { if (Constants.MYSQL.equals(type.name())) {
sb.append(Constants.JDBC_MYSQL); sb.append(Constants.JDBC_MYSQL);
@ -552,7 +560,11 @@ public class DataSourceService extends BaseService{
sb.append(Constants.JDBC_CLICKHOUSE); sb.append(Constants.JDBC_CLICKHOUSE);
sb.append(host).append(":").append(port); sb.append(host).append(":").append(port);
} else if (Constants.ORACLE.equals(type.name())) { } else if (Constants.ORACLE.equals(type.name())) {
sb.append(Constants.JDBC_ORACLE); if (connectType == DbConnectType.ORACLE_SID) {
sb.append(Constants.JDBC_ORACLE_SID);
} else {
sb.append(Constants.JDBC_ORACLE_SERVICE_NAME);
}
sb.append(host).append(":").append(port); sb.append(host).append(":").append(port);
} else if (Constants.SQLSERVER.equals(type.name())) { } else if (Constants.SQLSERVER.equals(type.name())) {
sb.append(Constants.JDBC_SQLSERVER); sb.append(Constants.JDBC_SQLSERVER);

34
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java

@ -38,11 +38,9 @@ import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.*; import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*; import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.dao.utils.DagHelper;
@ -162,6 +160,31 @@ public class ProcessDefinitionService extends BaseDAGService {
return result; return result;
} }
/**
* get resource ids
* @param processData process data
* @return resource ids
*/
private String getResourceIds(ProcessData processData) {
List<TaskNode> tasks = processData.getTasks();
Set<Integer> resourceIds = new HashSet<>();
for(TaskNode taskNode : tasks){
String taskParameter = taskNode.getParams();
AbstractParameters params = TaskParametersUtils.getParameters(taskNode.getType(),taskParameter);
Set<Integer> tempSet = params.getResourceFilesList().stream().map(t->t.getId()).collect(Collectors.toSet());
resourceIds.addAll(tempSet);
}
StringBuilder sb = new StringBuilder();
for(int i : resourceIds) {
if (sb.length() > 0) {
sb.append(",");
}
sb.append(i);
}
return sb.toString();
}
/** /**
* query proccess definition list * query proccess definition list
@ -946,7 +969,9 @@ public class ProcessDefinitionService extends BaseDAGService {
return result; return result;
} }
String processDefinitionJson = processDefinition.getProcessDefinitionJson(); String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class); ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check //process data check
@ -1163,6 +1188,7 @@ public class ProcessDefinitionService extends BaseDAGService {
private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) throws Exception { private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) throws Exception {
String processDefinitionJson = processDefinition.getProcessDefinitionJson(); String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class); ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//check process data //check process data

8
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java

@ -204,14 +204,8 @@ public class ProcessInstanceService extends BaseDAGService {
} }
} }
Set<String> exclusionSet = new HashSet<>();
exclusionSet.add(Constants.CLASS);
exclusionSet.add("locations");
exclusionSet.add("connects");
exclusionSet.add("processInstanceJson");
pageInfo.setTotalCount((int) processInstanceList.getTotal()); pageInfo.setTotalCount((int) processInstanceList.getTotal());
pageInfo.setLists(CollectionUtils.getListByExclusion(processInstances, exclusionSet)); pageInfo.setLists(processInstances);
result.put(Constants.DATA_LIST, pageInfo); result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS); putMsg(result, Status.SUCCESS);
return result; return result;

559
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java

@ -16,9 +16,15 @@
*/ */
package org.apache.dolphinscheduler.api.service; package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap; import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.api.utils.Result;
@ -39,6 +45,7 @@ import org.springframework.web.multipart.MultipartFile;
import java.text.MessageFormat; import java.text.MessageFormat;
import java.util.*; import java.util.*;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*; import static org.apache.dolphinscheduler.common.Constants.*;
@ -65,6 +72,82 @@ public class ResourcesService extends BaseService {
@Autowired @Autowired
private ResourceUserMapper resourceUserMapper; private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/** /**
* create resource * create resource
* *
@ -73,6 +156,8 @@ public class ResourcesService extends BaseService {
* @param desc description * @param desc description
* @param file file * @param file file
* @param type type * @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code * @return create result code
*/ */
@Transactional(rollbackFor = Exception.class) @Transactional(rollbackFor = Exception.class)
@ -80,7 +165,9 @@ public class ResourcesService extends BaseService {
String name, String name,
String desc, String desc,
ResourceType type, ResourceType type,
MultipartFile file) { MultipartFile file,
int pid,
String currentDir) {
Result result = new Result(); Result result = new Result();
// if hdfs not startup // if hdfs not startup
@ -123,7 +210,8 @@ public class ResourcesService extends BaseService {
} }
// check resoure name exists // check resoure name exists
if (checkResourceExists(name, 0, type.ordinal())) { String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name); logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST); putMsg(result, Status.RESOURCE_EXIST);
return result; return result;
@ -131,7 +219,9 @@ public class ResourcesService extends BaseService {
Date now = new Date(); Date now = new Date();
Resource resource = new Resource(name,file.getOriginalFilename(),desc,loginUser.getId(),type,file.getSize(),now,now);
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try { try {
resourcesMapper.insert(resource); resourcesMapper.insert(resource);
@ -151,7 +241,7 @@ public class ResourcesService extends BaseService {
} }
// fail upload // fail upload
if (!upload(loginUser, name, file, type)) { if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename()); logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR); putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename())); throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
@ -162,27 +252,29 @@ public class ResourcesService extends BaseService {
/** /**
* check resource is exists * check resource is exists
* *
* @param alias alias * @param fullName fullName
* @param userId user id * @param userId user id
* @param type type * @param type type
* @return true if resource exists * @return true if resource exists
*/ */
private boolean checkResourceExists(String alias, int userId, int type ){ private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(alias, userId, type);
return CollectionUtils.isNotEmpty(resources);
}
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
return false;
}
/** /**
* update resource * update resource
* * @param loginUser login user
* @param loginUser login user * @param resourceId resource id
* @param name alias * @param name name
* @param resourceId resource id * @param desc description
* @param type resource type * @param type resource type
* @param desc description * @return update result code
* @return update result code
*/ */
@Transactional(rollbackFor = Exception.class) @Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser, public Result updateResource(User loginUser,
@ -216,7 +308,10 @@ public class ResourcesService extends BaseService {
} }
//check resource aleady exists //check resource aleady exists
if (!resource.getAlias().equals(name) && checkResourceExists(name, 0, type.ordinal())) { String originFullName = resource.getFullName();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!resource.getAlias().equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name); logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST); putMsg(result, Status.RESOURCE_EXIST);
return result; return result;
@ -227,25 +322,41 @@ public class ResourcesService extends BaseService {
if (StringUtils.isEmpty(tenantCode)){ if (StringUtils.isEmpty(tenantCode)){
return result; return result;
} }
String nameWithSuffix = name;
//get the file suffix
String originResourceName = resource.getAlias(); String originResourceName = resource.getAlias();
String suffix = originResourceName.substring(originResourceName.lastIndexOf('.')); if (!resource.isDirectory()) {
//get the file suffix
//if the name without suffix then add it ,else use the origin name String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
String nameWithSuffix = name;
if(!name.endsWith(suffix)){ //if the name without suffix then add it ,else use the origin name
nameWithSuffix = nameWithSuffix + suffix; if(!name.endsWith(suffix)){
nameWithSuffix = nameWithSuffix + suffix;
}
} }
// updateResource data // updateResource data
List<Integer> childrenResource = listAllChildren(resource);
String oldFullName = resource.getFullName();
Date now = new Date(); Date now = new Date();
resource.setAlias(nameWithSuffix); resource.setAlias(nameWithSuffix);
resource.setFullName(fullName);
resource.setDescription(desc); resource.setDescription(desc);
resource.setUpdateTime(now); resource.setUpdateTime(now);
try { try {
resourcesMapper.updateById(resource); resourcesMapper.updateById(resource);
if (resource.isDirectory() && CollectionUtils.isNotEmpty(childrenResource)) {
List<Resource> childResourceList = new ArrayList<>();
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(oldFullName, fullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
}
putMsg(result, Status.SUCCESS); putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource); Map<Object, Object> dataMap = new BeanMap(resource);
@ -267,15 +378,9 @@ public class ResourcesService extends BaseService {
// get file hdfs path // get file hdfs path
// delete hdfs file by type // delete hdfs file by type
String originHdfsFileName = ""; String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
String destHdfsFileName = ""; String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
if (resource.getType().equals(ResourceType.FILE)) {
originHdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, originResourceName);
destHdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, name);
} else if (resource.getType().equals(ResourceType.UDF)) {
originHdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, originResourceName);
destHdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, name);
}
try { try {
if (HadoopUtils.getInstance().exists(originHdfsFileName)) { if (HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.info("hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName); logger.info("hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
@ -303,7 +408,7 @@ public class ResourcesService extends BaseService {
* @param pageSize page size * @param pageSize page size
* @return resource list page * @return resource list page
*/ */
public Map<String, Object> queryResourceListPaging(User loginUser, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) { public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5); HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize); Page<Resource> page = new Page(pageNo, pageSize);
@ -312,7 +417,7 @@ public class ResourcesService extends BaseService {
userId= 0; userId= 0;
} }
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page, IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId, type.ordinal(), searchVal); userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize); PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal()); pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords()); pageInfo.setLists(resourceIPage.getRecords());
@ -321,17 +426,46 @@ public class ResourcesService extends BaseService {
return result; return result;
} }
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/** /**
* upload file to hdfs * upload file to hdfs
* *
* @param loginUser * @param loginUser login user
* @param name * @param fullName full name
* @param file * @param file file
*/ */
private boolean upload(User loginUser, String name, MultipartFile file, ResourceType type) { private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local // save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename()); String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name); String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix // determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
@ -344,15 +478,8 @@ public class ResourcesService extends BaseService {
// save file to hdfs, and delete original file // save file to hdfs, and delete original file
String hdfsFilename = ""; String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = ""; String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
if (type.equals(ResourceType.FILE)) {
hdfsFilename = HadoopUtils.getHdfsFilename(tenantCode, name);
resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
} else if (type.equals(ResourceType.UDF)) {
hdfsFilename = HadoopUtils.getHdfsUdfFilename(tenantCode, name);
resourcePath = HadoopUtils.getHdfsUdfDir(tenantCode);
}
try { try {
// if tenant dir not exists // if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) { if (!HadoopUtils.getInstance().exists(resourcePath)) {
@ -377,13 +504,59 @@ public class ResourcesService extends BaseService {
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) { public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5); Map<String, Object> result = new HashMap<>(5);
List<Resource> resourceList;
Set<Resource> allResourceList = getAllResources(loginUser, type);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(new ArrayList<>(allResourceList));
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get all resources
* @param loginUser login user
* @return all resource set
*/
private Set<Resource> getAllResources(User loginUser, ResourceType type) {
int userId = loginUser.getId(); int userId = loginUser.getId();
boolean listChildren = true;
if(isAdmin(loginUser)){ if(isAdmin(loginUser)){
userId = 0; userId = 0;
listChildren = false;
}
List<Resource> resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
Set<Resource> allResourceList = new HashSet<>(resourceList);
if (listChildren) {
Set<Integer> authorizedIds = new HashSet<>();
List<Resource> authorizedDirecoty = resourceList.stream().filter(t->t.getUserId() != loginUser.getId() && t.isDirectory()).collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(authorizedDirecoty)) {
for(Resource resource : authorizedDirecoty){
authorizedIds.addAll(listAllChildren(resource));
}
List<Resource> childrenResources = resourcesMapper.listResourceByIds(authorizedIds.toArray(new Integer[authorizedIds.size()]));
allResourceList.addAll(childrenResources);
}
} }
resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal()); return allResourceList;
result.put(Constants.DATA_LIST, resourceList); }
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
Set<Resource> allResourceList = getAllResources(loginUser, type);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS); putMsg(result,Status.SUCCESS);
return result; return result;
@ -419,23 +592,51 @@ public class ResourcesService extends BaseService {
putMsg(result, Status.USER_NO_OPERATION_PERM); putMsg(result, Status.USER_NO_OPERATION_PERM);
return result; return result;
} }
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new int[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId()); String tenantCode = getTenantCode(resource.getUserId(),result);
if (tenant == null){ if (StringUtils.isEmpty(tenantCode)){
putMsg(result, Status.TENANT_NOT_EXIST); return result;
}
// get all resource id of process definitions those is released
Map<Integer, Set<Integer>> resourceProcessMap = getResourceProcessMap();
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource);
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result; return result;
} }
String hdfsFilename = "";
// delete hdfs file by type // get hdfs file by type
String tenantCode = tenant.getTenantCode(); String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
hdfsFilename = getHdfsFileName(resource, tenantCode, hdfsFilename);
//delete data in database //delete data in database
resourcesMapper.deleteById(resourceId); resourcesMapper.deleteIds(allChildren.toArray(new Integer[allChildren.size()]));
resourceUserMapper.deleteResourceUser(0, resourceId); resourceUserMapper.deleteResourceUser(0, resourceId);
//delete file on hdfs //delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, false); HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS); putMsg(result, Status.SUCCESS);
return result; return result;
@ -444,15 +645,15 @@ public class ResourcesService extends BaseService {
/** /**
* verify resource by name and type * verify resource by name and type
* @param loginUser login user * @param loginUser login user
* @param name resource alias * @param fullName resource full name
* @param type resource type * @param type resource type
* @return true if the resource name not exists, otherwise return false * @return true if the resource name not exists, otherwise return false
*/ */
public Result verifyResourceName(String name, ResourceType type,User loginUser) { public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result(); Result result = new Result();
putMsg(result, Status.SUCCESS); putMsg(result, Status.SUCCESS);
if (checkResourceExists(name, 0, type.ordinal())) { if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, name); logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST); putMsg(result, Status.RESOURCE_EXIST);
} else { } else {
// query tenant // query tenant
@ -461,9 +662,9 @@ public class ResourcesService extends BaseService {
String tenantCode = tenant.getTenantCode(); String tenantCode = tenant.getTenantCode();
try { try {
String hdfsFilename = getHdfsFileName(type,tenantCode,name); String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){ if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, name,hdfsFilename); logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename); putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
} }
@ -480,6 +681,48 @@ public class ResourcesService extends BaseService {
return result; return result;
} }
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/** /**
* view resource file online * view resource file online
* *
@ -501,7 +744,7 @@ public class ResourcesService extends BaseService {
// get resource by id // get resource by id
Resource resource = resourcesMapper.selectById(resourceId); Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) { if (resource == null) {
logger.error("resouce file not exist, resource id {}", resourceId); logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST); putMsg(result, Status.RESOURCE_NOT_EXIST);
return result; return result;
} }
@ -511,7 +754,7 @@ public class ResourcesService extends BaseService {
if (StringUtils.isNotEmpty(resourceViewSuffixs)) { if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) { if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support view, resource id {}", nameSuffix, resourceId); logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result; return result;
} }
@ -523,7 +766,7 @@ public class ResourcesService extends BaseService {
} }
// hdfs path // hdfs path
String hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias()); String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName); logger.info("resource hdfs path is {} ", hdfsFileName);
try { try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){ if(HadoopUtils.getInstance().exists(hdfsFileName)){
@ -559,7 +802,7 @@ public class ResourcesService extends BaseService {
* @return create result code * @return create result code
*/ */
@Transactional(rollbackFor = Exception.class) @Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content) { public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result(); Result result = new Result();
// if resource upload startup // if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){ if (!PropertyUtils.getResUploadStartupState()){
@ -581,15 +824,16 @@ public class ResourcesService extends BaseService {
} }
String name = fileName.trim() + "." + nameSuffix; String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(name,type,loginUser); result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) { if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result; return result;
} }
// save data // save data
Date now = new Date(); Date now = new Date();
Resource resource = new Resource(name,name,desc,loginUser.getId(),type,content.getBytes().length,now,now); Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource); resourcesMapper.insert(resource);
@ -605,7 +849,7 @@ public class ResourcesService extends BaseService {
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(name, tenantCode, content); result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) { if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg()); throw new RuntimeException(result.getMsg());
} }
@ -657,7 +901,7 @@ public class ResourcesService extends BaseService {
resourcesMapper.updateById(resource); resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getAlias(), tenantCode, content); result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) { if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg()); throw new RuntimeException(result.getMsg());
} }
@ -665,10 +909,10 @@ public class ResourcesService extends BaseService {
} }
/** /**
* @param resourceName * @param resourceName resource name
* @param tenantCode * @param tenantCode tenant code
* @param content * @param content content
* @return * @return result
*/ */
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) { private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result(); Result result = new Result();
@ -684,8 +928,8 @@ public class ResourcesService extends BaseService {
return result; return result;
} }
// get file hdfs path // get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resourceName); hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode); String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName); logger.info("resource hdfs path is {} ", hdfsFileName);
@ -729,11 +973,14 @@ public class ResourcesService extends BaseService {
logger.error("download file not exist, resource id {}", resourceId); logger.error("download file not exist, resource id {}", resourceId);
return null; return null;
} }
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
User user = userMapper.queryDetailsById(resource.getUserId()); User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode(); String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
String hdfsFileName = ""; String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getAlias());
hdfsFileName = getHdfsFileName(resource, tenantCode, hdfsFileName);
String localFileName = FileUtils.getDownloadFilename(resource.getAlias()); String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName); logger.info("resource hdfs path is {} ", hdfsFileName);
@ -743,6 +990,33 @@ public class ResourcesService extends BaseService {
} }
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/** /**
* unauthorized file * unauthorized file
* *
@ -757,8 +1031,8 @@ public class ResourcesService extends BaseService {
return result; return result;
} }
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId); List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Object> list ; List<Resource> list ;
if (CollectionUtils.isNotEmpty(resourceList)) { if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList); Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId); List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
@ -767,15 +1041,12 @@ public class ResourcesService extends BaseService {
}else { }else {
list = new ArrayList<>(0); list = new ArrayList<>(0);
} }
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, list); result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS); putMsg(result,Status.SUCCESS);
return result; return result;
} }
/** /**
* unauthorized udf function * unauthorized udf function
* *
@ -841,46 +1112,15 @@ public class ResourcesService extends BaseService {
return result; return result;
} }
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId); List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
result.put(Constants.DATA_LIST, authedResources); logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS); putMsg(result,Status.SUCCESS);
return result; return result;
} }
/**
* get hdfs file name
*
* @param resource resource
* @param tenantCode tenant code
* @param hdfsFileName hdfs file name
* @return hdfs file name
*/
private String getHdfsFileName(Resource resource, String tenantCode, String hdfsFileName) {
if (resource.getType().equals(ResourceType.FILE)) {
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias());
} else if (resource.getType().equals(ResourceType.UDF)) {
hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, resource.getAlias());
}
return hdfsFileName;
}
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param hdfsFileName hdfs file name
* @return hdfs file name
*/
private String getHdfsFileName(ResourceType resourceType, String tenantCode, String hdfsFileName) {
if (resourceType.equals(ResourceType.FILE)) {
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, hdfsFileName);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, hdfsFileName);
}
return hdfsFileName;
}
/** /**
* get authorized resource list * get authorized resource list
* *
@ -920,4 +1160,69 @@ public class ResourcesService extends BaseService {
return tenant.getTenantCode(); return tenant.getTenantCode();
} }
/**
* list all children id
* @param resource resource
* @return all children id
*/
List<Integer> listAllChildren(Resource resource){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
/**
* get resource process map key is resource id,value is the set of process definition
* @return resource process definition map
*/
private Map<Integer,Set<Integer>> getResourceProcessMap(){
Map<Integer, String> map = new HashMap<>();
Map<Integer, Set<Integer>> result = new HashMap<>();
List<Map<String, Object>> list = processDefinitionMapper.listResources();
if (CollectionUtils.isNotEmpty(list)) {
for (Map<String, Object> tempMap : list) {
map.put((Integer) tempMap.get("id"), (String)tempMap.get("resource_ids"));
}
}
for (Map.Entry<Integer, String> entry : map.entrySet()) {
Integer mapKey = entry.getKey();
String[] arr = entry.getValue().split(",");
Set<Integer> mapValues = Arrays.stream(arr).map(Integer::parseInt).collect(Collectors.toSet());
for (Integer value : mapValues) {
if (result.containsKey(value)) {
Set<Integer> set = result.get(value);
set.add(mapKey);
result.put(value, set);
} else {
Set<Integer> set = new HashSet<>();
set.add(mapKey);
result.put(value, set);
}
}
}
return result;
}
} }

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java

@ -118,7 +118,7 @@ public class UdfFuncService extends BaseService{
} }
udf.setDescription(desc); udf.setDescription(desc);
udf.setResourceId(resourceId); udf.setResourceId(resourceId);
udf.setResourceName(resource.getAlias()); udf.setResourceName(resource.getFullName());
udf.setType(type); udf.setType(type);
udf.setCreateTime(now); udf.setCreateTime(now);
@ -226,7 +226,7 @@ public class UdfFuncService extends BaseService{
} }
udf.setDescription(desc); udf.setDescription(desc);
udf.setResourceId(resourceId); udf.setResourceId(resourceId);
udf.setResourceName(resource.getAlias()); udf.setResourceName(resource.getFullName());
udf.setType(type); udf.setType(type);
udf.setUpdateTime(now); udf.setUpdateTime(now);

4
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java

@ -39,6 +39,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
* data source controller test * data source controller test
*/ */
public class DataSourceControllerTest extends AbstractControllerTest{ public class DataSourceControllerTest extends AbstractControllerTest{
private static Logger logger = LoggerFactory.getLogger(DataSourceControllerTest.class); private static Logger logger = LoggerFactory.getLogger(DataSourceControllerTest.class);
@Ignore @Ignore
@ -95,6 +96,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
@Ignore
@Test @Test
public void testQueryDataSource() throws Exception { public void testQueryDataSource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
@ -169,6 +171,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
} }
@Ignore
@Test @Test
public void testConnectionTest() throws Exception { public void testConnectionTest() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
@ -248,6 +251,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
@Ignore
@Test @Test
public void testDelete() throws Exception { public void testDelete() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();

58
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java

@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* resource filter test
*/
public class ResourceFilterTest {
private static Logger logger = LoggerFactory.getLogger(ResourceFilterTest.class);
@Test
public void filterTest(){
List<Resource> allList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
allList.add(resource1);
allList.add(resource2);
allList.add(resource3);
allList.add(resource4);
allList.add(resource5);
allList.add(resource6);
allList.add(resource7);
ResourceFilter resourceFilter = new ResourceFilter(".jar",allList);
List<Resource> resourceList = resourceFilter.filter();
Assert.assertNotNull(resourceList);
resourceList.stream().forEach(t-> logger.info(t.toString()));
}
}

82
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java

@ -0,0 +1,82 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.visitor;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
/**
* resource tree visitor test
*/
public class ResourceTreeVisitorTest {
@Test
public void visit() throws Exception {
List<Resource> resourceList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
resourceList.add(resource1);
resourceList.add(resource2);
resourceList.add(resource3);
resourceList.add(resource4);
resourceList.add(resource5);
resourceList.add(resource6);
resourceList.add(resource7);
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(resourceList);
ResourceComponent resourceComponent = resourceTreeVisitor.visit();
Assert.assertNotNull(resourceComponent.getChildren());
}
@Test
public void rootNode() throws Exception {
List<Resource> resourceList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
resourceList.add(resource1);
resourceList.add(resource2);
resourceList.add(resource3);
resourceList.add(resource4);
resourceList.add(resource5);
resourceList.add(resource6);
resourceList.add(resource7);
ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(resourceList);
Assert.assertTrue(resourceTreeVisitor.rootNode(resource1));
Assert.assertTrue(resourceTreeVisitor.rootNode(resource2));
Assert.assertFalse(resourceTreeVisitor.rootNode(resource3));
}
}

112
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java

@ -24,10 +24,7 @@ import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.UdfFunc;
@ -40,6 +37,7 @@ import org.junit.runner.RunWith;
import org.mockito.InjectMocks; import org.mockito.InjectMocks;
import org.mockito.Mock; import org.mockito.Mock;
import org.mockito.Mockito; import org.mockito.Mockito;
import org.omg.CORBA.Any;
import org.powermock.api.mockito.PowerMockito; import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.core.classloader.annotations.PrepareForTest;
@ -73,6 +71,8 @@ public class ResourcesServiceTest {
private UserMapper userMapper; private UserMapper userMapper;
@Mock @Mock
private UdfFuncMapper udfFunctionMapper; private UdfFuncMapper udfFunctionMapper;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
@Before @Before
public void setUp() { public void setUp() {
@ -96,14 +96,14 @@ public class ResourcesServiceTest {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User(); User user = new User();
//HDFS_NOT_STARTUP //HDFS_NOT_STARTUP
Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null); Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null,-1,"/");
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_FILE_IS_EMPTY //RESOURCE_FILE_IS_EMPTY
MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf",new String().getBytes()); MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf",new String().getBytes());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile); result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(),result.getMsg()); Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(),result.getMsg());
@ -111,31 +111,42 @@ public class ResourcesServiceTest {
mockMultipartFile = new MockMultipartFile("test.pdf","test.pdf","pdf",new String("test").getBytes()); mockMultipartFile = new MockMultipartFile("test.pdf","test.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("test.pdf")).thenReturn("pdf"); PowerMockito.when(FileUtils.suffix("test.pdf")).thenReturn("pdf");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar"); PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile); result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(),result.getMsg()); Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR //UDF_RESOURCE_SUFFIX_NOT_JAR
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf","ResourcesServiceTest.pdf","pdf",new String("test").getBytes()); mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf","ResourcesServiceTest.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.pdf")).thenReturn("pdf"); PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.pdf")).thenReturn("pdf");
result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile); result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile,-1,"/");
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(),result.getMsg()); Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR }
Mockito.when(tenantMapper.queryById(0)).thenReturn(getTenant());
Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest.jar", 0, 1)).thenReturn(getResourceList()); @Test
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.jar","ResourcesServiceTest.jar","pdf",new String("test").getBytes()); public void testCreateDirecotry(){
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//SUCCESS //PARENT_RESOURCE_NOT_EXIST
Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest.jar", 0, 1)).thenReturn(new ArrayList<>()); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile); Mockito.when(resourcesMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,1,"/");
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg()); Assert.assertEquals(Status.PARENT_RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.queryResourceList("/directoryTest", 0, 0)).thenReturn(getResourceList());
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
} }
@ -163,41 +174,46 @@ public class ResourcesServiceTest {
//SUCCESS //SUCCESS
user.setId(1); user.setId(1);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest.jar",ResourceType.FILE); Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg()); Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
//RESOURCE_EXIST //RESOURCE_EXIST
Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList()); Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.FILE); result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg()); Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
//USER_NOT_EXIST //USER_NOT_EXIST
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF); Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode()); Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST //TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser()); Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF); Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg()); Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST //RESOURCE_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsFilename(Mockito.any(), Mockito.any())).thenReturn("test1"); PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test1");
try { try {
Mockito.when(hadoopUtils.exists("test")).thenReturn(true); Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
} catch (IOException e) { } catch (IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF); result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg()); Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//SUCCESS //SUCCESS
PowerMockito.when(HadoopUtils.getHdfsFilename(Mockito.any(), Mockito.any())).thenReturn("test"); PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test");
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF); result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg()); Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
@ -212,8 +228,8 @@ public class ResourcesServiceTest {
resourcePage.setTotal(1); resourcePage.setTotal(1);
resourcePage.setRecords(getResourceList()); resourcePage.setRecords(getResourceList());
Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class), Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class),
Mockito.eq(0), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage); Mockito.eq(0),Mockito.eq(-1), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
Map<String, Object> result = resourcesService.queryResourceListPaging(loginUser,ResourceType.FILE,"test",1,10); Map<String, Object> result = resourcesService.queryResourceListPaging(loginUser,-1,ResourceType.FILE,"test",1,10);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
PageInfo pageInfo = (PageInfo) result.get(Constants.DATA_LIST); PageInfo pageInfo = (PageInfo) result.get(Constants.DATA_LIST);
@ -263,6 +279,7 @@ public class ResourcesServiceTest {
//TENANT_NOT_EXIST //TENANT_NOT_EXIST
loginUser.setUserType(UserType.ADMIN_USER); loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setTenantId(2); loginUser.setTenantId(2);
Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(loginUser);
result = resourcesService.delete(loginUser,1); result = resourcesService.delete(loginUser,1);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg()); Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
@ -285,14 +302,20 @@ public class ResourcesServiceTest {
User user = new User(); User user = new User();
user.setId(1); user.setId(1);
Mockito.when(resourcesMapper.queryResourceList("test", 0, 0)).thenReturn(getResourceList()); Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest.jar", 0, 0)).thenReturn(getResourceList());
Result result = resourcesService.verifyResourceName("test",ResourceType.FILE,user); Result result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg()); Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST //TENANT_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.verifyResourceName("test1",ResourceType.FILE,user); String unExistFullName = "/test.jar";
try {
Mockito.when(hadoopUtils.exists(unExistFullName)).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.verifyResourceName("/test.jar",ResourceType.FILE,user);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg()); Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
@ -304,10 +327,10 @@ public class ResourcesServiceTest {
} catch (IOException e) { } catch (IOException e) {
logger.error("hadoop error",e); logger.error("hadoop error",e);
} }
PowerMockito.when(HadoopUtils.getHdfsFilename("123", "test1")).thenReturn("test"); PowerMockito.when(HadoopUtils.getHdfsResourceFileName("123", "test1")).thenReturn("test");
result = resourcesService.verifyResourceName("test1",ResourceType.FILE,user); result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString()); logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_FILE_EXIST.getCode()==result.getCode()); Assert.assertTrue(Status.RESOURCE_EXIST.getCode()==result.getCode());
//SUCCESS //SUCCESS
result = resourcesService.verifyResourceName("test2",ResourceType.FILE,user); result = resourcesService.verifyResourceName("test2",ResourceType.FILE,user);
@ -389,14 +412,14 @@ public class ResourcesServiceTest {
PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir"); PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir");
User user = getUser(); User user = getUser();
//HDFS_NOT_STARTUP //HDFS_NOT_STARTUP
Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content"); Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg()); Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW //RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true); PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class"); PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content"); result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg()); Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
@ -404,7 +427,7 @@ public class ResourcesServiceTest {
try { try {
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar"); PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content"); result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content",-1,"/");
}catch (RuntimeException ex){ }catch (RuntimeException ex){
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage()); Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage());
@ -413,7 +436,7 @@ public class ResourcesServiceTest {
//SUCCESS //SUCCESS
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test"); Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true); PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content"); result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString()); logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg()); Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
@ -584,13 +607,26 @@ public class ResourcesServiceTest {
private Resource getResource(){ private Resource getResource(){
Resource resource = new Resource(); Resource resource = new Resource();
resource.setPid(-1);
resource.setUserId(1); resource.setUserId(1);
resource.setDescription("ResourcesServiceTest.jar"); resource.setDescription("ResourcesServiceTest.jar");
resource.setAlias("ResourcesServiceTest.jar"); resource.setAlias("ResourcesServiceTest.jar");
resource.setFullName("/ResourcesServiceTest.jar");
resource.setType(ResourceType.FILE); resource.setType(ResourceType.FILE);
return resource; return resource;
} }
private Resource getUdfResource(){
Resource resource = new Resource();
resource.setUserId(1);
resource.setDescription("udfTest");
resource.setAlias("udfTest.jar");
resource.setFullName("/udfTest.jar");
resource.setType(ResourceType.UDF);
return resource;
}
private UdfFunc getUdfFunc(){ private UdfFunc getUdfFunc(){
UdfFunc udfFunc = new UdfFunc(); UdfFunc udfFunc = new UdfFunc();

7
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java

@ -43,6 +43,7 @@ import org.junit.Test;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Map; import java.util.Map;
import static org.junit.Assert.*; import static org.junit.Assert.*;
@ -173,7 +174,11 @@ public class CheckUtilsTest {
// MapreduceParameters // MapreduceParameters
MapreduceParameters mapreduceParameters = new MapreduceParameters(); MapreduceParameters mapreduceParameters = new MapreduceParameters();
assertFalse(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString())); assertFalse(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString()));
mapreduceParameters.setMainJar(new ResourceInfo());
ResourceInfo resourceInfoMapreduce = new ResourceInfo();
resourceInfoMapreduce.setId(1);
resourceInfoMapreduce.setRes("");
mapreduceParameters.setMainJar(resourceInfoMapreduce);
mapreduceParameters.setProgramType(ProgramType.JAVA); mapreduceParameters.setProgramType(ProgramType.JAVA);
assertTrue(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString())); assertTrue(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString()));

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java

@ -972,7 +972,8 @@ public final class Constants {
public static final String JDBC_POSTGRESQL = "jdbc:postgresql://"; public static final String JDBC_POSTGRESQL = "jdbc:postgresql://";
public static final String JDBC_HIVE_2 = "jdbc:hive2://"; public static final String JDBC_HIVE_2 = "jdbc:hive2://";
public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://"; public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://";
public static final String JDBC_ORACLE = "jdbc:oracle:thin:@//"; public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@";
public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//";
public static final String JDBC_SQLSERVER = "jdbc:sqlserver://"; public static final String JDBC_SQLSERVER = "jdbc:sqlserver://";
public static final String JDBC_DB2 = "jdbc:db2://"; public static final String JDBC_DB2 = "jdbc:db2://";

12
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java

@ -23,13 +23,17 @@ import com.baomidou.mybatisplus.annotation.EnumValue;
*/ */
public enum AuthorizationType { public enum AuthorizationType {
/** /**
* 0 RESOURCE_FILE; * 0 RESOURCE_FILE_ID;
* 0 RESOURCE_FILE_NAME;
* 1 UDF_FILE;
* 1 DATASOURCE; * 1 DATASOURCE;
* 2 UDF; * 2 UDF;
*/ */
RESOURCE_FILE(0, "resource file"), RESOURCE_FILE_ID(0, "resource file id"),
DATASOURCE(1, "data source"), RESOURCE_FILE_NAME(1, "resource file name"),
UDF(2, "udf function"); UDF_FILE(2, "udf file"),
DATASOURCE(3, "data source"),
UDF(4, "udf function");
AuthorizationType(int code, String descp){ AuthorizationType(int code, String descp){
this.code = code; this.code = code;

44
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java

@ -0,0 +1,44 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
public enum DbConnectType {
ORACLE_SERVICE_NAME(0, "Oracle Service Name"),
ORACLE_SID(1, "Oracle SID");
DbConnectType(int code, String descp) {
this.code = code;
this.descp = descp;
}
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java

@ -23,6 +23,16 @@ public class ResourceInfo {
/** /**
* res the name of the resource that was uploaded * res the name of the resource that was uploaded
*/ */
private int id;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
private String res; private String res;
public String getRes() { public String getRes() {

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java

@ -17,6 +17,7 @@
package org.apache.dolphinscheduler.common.task; package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import java.util.LinkedHashMap; import java.util.LinkedHashMap;
import java.util.List; import java.util.List;
@ -31,7 +32,7 @@ public abstract class AbstractParameters implements IParameters {
public abstract boolean checkParameters(); public abstract boolean checkParameters();
@Override @Override
public abstract List<String> getResourceFilesList(); public abstract List<ResourceInfo> getResourceFilesList();
/** /**
* local parameters * local parameters

4
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java

@ -16,6 +16,8 @@
*/ */
package org.apache.dolphinscheduler.common.task; package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import java.util.List; import java.util.List;
/** /**
@ -34,5 +36,5 @@ public interface IParameters {
* *
* @return resource files list * @return resource files list
*/ */
List<String> getResourceFilesList(); List<ResourceInfo> getResourceFilesList();
} }

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task.conditions;
import org.apache.dolphinscheduler.common.enums.DependentRelation; import org.apache.dolphinscheduler.common.enums.DependentRelation;
import org.apache.dolphinscheduler.common.model.DependentTaskModel; import org.apache.dolphinscheduler.common.model.DependentTaskModel;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.List; import java.util.List;
@ -41,7 +42,7 @@ public class ConditionsParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
return null; return null;
} }

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java

@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
/** /**
@ -198,7 +199,7 @@ public class DataxParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>(); return new ArrayList<>();
} }

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task.dependent;
import org.apache.dolphinscheduler.common.enums.DependentRelation; import org.apache.dolphinscheduler.common.enums.DependentRelation;
import org.apache.dolphinscheduler.common.model.DependentTaskModel; import org.apache.dolphinscheduler.common.model.DependentTaskModel;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.ArrayList; import java.util.ArrayList;
@ -36,7 +37,7 @@ public class DependentParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>(); return new ArrayList<>();
} }

29
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java

@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.flink;
import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.Collections; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.stream.Collectors;
/** /**
* spark parameters * spark parameters
@ -50,35 +50,35 @@ public class FlinkParameters extends AbstractParameters {
private String mainArgs; private String mainArgs;
/** /**
* slot个数 * slot count
*/ */
private int slot; private int slot;
/** /**
*Yarn application的名字 *Yarn application name
*/ */
private String appName; private String appName;
/** /**
* taskManager 数量 * taskManager count
*/ */
private int taskManager; private int taskManager;
/** /**
* jobManagerMemory 内存大小 * job manager memory
*/ */
private String jobManagerMemory ; private String jobManagerMemory ;
/** /**
* taskManagerMemory内存大小 * task manager memory
*/ */
private String taskManagerMemory; private String taskManagerMemory;
/** /**
* resource list * resource list
*/ */
private List<ResourceInfo> resourceList; private List<ResourceInfo> resourceList = new ArrayList<>();
/** /**
* The YARN queue to submit to * The YARN queue to submit to
@ -207,16 +207,11 @@ public class FlinkParameters extends AbstractParameters {
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
if(resourceList != null ) { if (mainJar != null && !resourceList.contains(mainJar)) {
List<String> resourceFiles = resourceList.stream() resourceList.add(mainJar);
.map(ResourceInfo::getRes).collect(Collectors.toList());
if(mainJar != null) {
resourceFiles.add(mainJar.getRes());
}
return resourceFiles;
} }
return Collections.emptyList(); return resourceList;
} }

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java

@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.common.task.http;
import org.apache.dolphinscheduler.common.enums.HttpCheckCondition; import org.apache.dolphinscheduler.common.enums.HttpCheckCondition;
import org.apache.dolphinscheduler.common.enums.HttpMethod; import org.apache.dolphinscheduler.common.enums.HttpMethod;
import org.apache.dolphinscheduler.common.process.HttpProperty; import org.apache.dolphinscheduler.common.process.HttpProperty;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
@ -62,7 +63,7 @@ public class HttpParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>(); return new ArrayList<>();
} }

20
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java

@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.mr;
import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.Collections; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.stream.Collectors;
public class MapreduceParameters extends AbstractParameters { public class MapreduceParameters extends AbstractParameters {
@ -54,7 +54,7 @@ public class MapreduceParameters extends AbstractParameters {
/** /**
* resource list * resource list
*/ */
private List<ResourceInfo> resourceList; private List<ResourceInfo> resourceList = new ArrayList<>();
/** /**
* program type * program type
@ -125,16 +125,12 @@ public class MapreduceParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
if(resourceList != null ) { if (mainJar != null && !resourceList.contains(mainJar)) {
List<String> resourceFiles = resourceList.stream() resourceList.add(mainJar);
.map(ResourceInfo::getRes).collect(Collectors.toList());
if(mainJar != null) {
resourceFiles.add(mainJar.getRes());
}
return resourceFiles;
} }
return Collections.emptyList();
return resourceList;
} }
@Override @Override

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java

@ -16,6 +16,7 @@
*/ */
package org.apache.dolphinscheduler.common.task.procedure; package org.apache.dolphinscheduler.common.task.procedure;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
@ -74,7 +75,7 @@ public class ProcedureParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>(); return new ArrayList<>();
} }

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java

@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.List; import java.util.List;
import java.util.stream.Collectors;
public class PythonParameters extends AbstractParameters { public class PythonParameters extends AbstractParameters {
/** /**
@ -56,12 +55,7 @@ public class PythonParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
if (resourceList != null) { return this.resourceList;
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
} }
} }

9
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java

@ -59,12 +59,7 @@ public class ShellParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
if (resourceList != null) { return resourceList;
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
} }
} }

20
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java

@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.spark;
import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import java.util.Collections; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.stream.Collectors;
/** /**
* spark parameters * spark parameters
@ -78,7 +78,7 @@ public class SparkParameters extends AbstractParameters {
/** /**
* resource list * resource list
*/ */
private List<ResourceInfo> resourceList; private List<ResourceInfo> resourceList = new ArrayList<>();
/** /**
* The YARN queue to submit to * The YARN queue to submit to
@ -219,18 +219,12 @@ public class SparkParameters extends AbstractParameters {
return mainJar != null && programType != null && sparkVersion != null; return mainJar != null && programType != null && sparkVersion != null;
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
if(resourceList !=null ) { if (mainJar != null && !resourceList.contains(mainJar)) {
List<String> resourceFilesList = resourceList.stream() resourceList.add(mainJar);
.map(ResourceInfo::getRes).collect(Collectors.toList());
if(mainJar != null){
resourceFilesList.add(mainJar.getRes());
}
return resourceFilesList;
} }
return Collections.emptyList(); return resourceList;
} }

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java

@ -16,6 +16,7 @@
*/ */
package org.apache.dolphinscheduler.common.task.sql; package org.apache.dolphinscheduler.common.task.sql;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
@ -189,7 +190,7 @@ public class SqlParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>(); return new ArrayList<>();
} }

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java

@ -16,6 +16,7 @@
*/ */
package org.apache.dolphinscheduler.common.task.sqoop; package org.apache.dolphinscheduler.common.task.sqoop;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.common.utils.StringUtils;
@ -111,7 +112,7 @@ public class SqoopParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>(); return new ArrayList<>();
} }
} }

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java

@ -15,6 +15,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.dolphinscheduler.common.task.subprocess; package org.apache.dolphinscheduler.common.task.subprocess;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.ArrayList; import java.util.ArrayList;
@ -42,7 +43,7 @@ public class SubProcessParameters extends AbstractParameters {
} }
@Override @Override
public List<String> getResourceFilesList() { public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>(); return new ArrayList<>();
} }
} }

49
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java

@ -26,6 +26,7 @@ import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONException; import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject; import com.alibaba.fastjson.JSONObject;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -415,6 +416,22 @@ public class HadoopUtils implements Closeable {
} }
} }
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @return hdfs resource dir
*/
public static String getHdfsDir(ResourceType resourceType,String tenantCode) {
String hdfsDir = "";
if (resourceType.equals(ResourceType.FILE)) {
hdfsDir = getHdfsResDir(tenantCode);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsDir = getHdfsUdfDir(tenantCode);
}
return hdfsDir;
}
/** /**
* hdfs resource dir * hdfs resource dir
* *
@ -450,22 +467,42 @@ public class HadoopUtils implements Closeable {
* get absolute path and name for file on hdfs * get absolute path and name for file on hdfs
* *
* @param tenantCode tenant code * @param tenantCode tenant code
* @param filename file name * @param fileName file name
* @return get absolute path and name for file on hdfs
*/
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param fileName file name
* @return hdfs file name
*/
public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) {
return String.format("%s/%s", getHdfsDir(resourceType,tenantCode), fileName);
}
/**
* get absolute path and name for resource file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for file on hdfs * @return get absolute path and name for file on hdfs
*/ */
public static String getHdfsFilename(String tenantCode, String filename) { public static String getHdfsResourceFileName(String tenantCode, String fileName) {
return String.format("%s/%s", getHdfsResDir(tenantCode), filename); return String.format("%s/%s", getHdfsResDir(tenantCode), fileName);
} }
/** /**
* get absolute path and name for udf file on hdfs * get absolute path and name for udf file on hdfs
* *
* @param tenantCode tenant code * @param tenantCode tenant code
* @param filename file name * @param fileName file name
* @return get absolute path and name for udf file on hdfs * @return get absolute path and name for udf file on hdfs
*/ */
public static String getHdfsUdfFilename(String tenantCode, String filename) { public static String getHdfsUdfFileName(String tenantCode, String fileName) {
return String.format("%s/%s", getHdfsUdfDir(tenantCode), filename); return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName);
} }
/** /**

14
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/FlinkParametersTest.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters; import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -28,8 +29,7 @@ public class FlinkParametersTest {
@Test @Test
public void getResourceFilesList() { public void getResourceFilesList() {
FlinkParameters flinkParameters = new FlinkParameters(); FlinkParameters flinkParameters = new FlinkParameters();
Assert.assertNotNull(flinkParameters.getResourceFilesList()); Assert.assertTrue(CollectionUtils.isEmpty(flinkParameters.getResourceFilesList()));
Assert.assertTrue(flinkParameters.getResourceFilesList().isEmpty());
ResourceInfo mainResource = new ResourceInfo(); ResourceInfo mainResource = new ResourceInfo();
mainResource.setRes("testFlinkMain-1.0.0-SNAPSHOT.jar"); mainResource.setRes("testFlinkMain-1.0.0-SNAPSHOT.jar");
@ -41,15 +41,17 @@ public class FlinkParametersTest {
resourceInfos.add(resourceInfo1); resourceInfos.add(resourceInfo1);
flinkParameters.setResourceList(resourceInfos); flinkParameters.setResourceList(resourceInfos);
Assert.assertNotNull(flinkParameters.getResourceFilesList()); List<ResourceInfo> resourceFilesList = flinkParameters.getResourceFilesList();
Assert.assertEquals(2, flinkParameters.getResourceFilesList().size()); Assert.assertNotNull(resourceFilesList);
Assert.assertEquals(2, resourceFilesList.size());
ResourceInfo resourceInfo2 = new ResourceInfo(); ResourceInfo resourceInfo2 = new ResourceInfo();
resourceInfo2.setRes("testFlinkParameters2.jar"); resourceInfo2.setRes("testFlinkParameters2.jar");
resourceInfos.add(resourceInfo2); resourceInfos.add(resourceInfo2);
flinkParameters.setResourceList(resourceInfos); flinkParameters.setResourceList(resourceInfos);
Assert.assertNotNull(flinkParameters.getResourceFilesList()); resourceFilesList = flinkParameters.getResourceFilesList();
Assert.assertEquals(3, flinkParameters.getResourceFilesList().size()); Assert.assertNotNull(resourceFilesList);
Assert.assertEquals(3, resourceFilesList.size());
} }
} }

15
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java

@ -17,13 +17,28 @@
package org.apache.dolphinscheduler.dao.datasource; package org.apache.dolphinscheduler.dao.datasource;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.DbType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* data source of Oracle * data source of Oracle
*/ */
public class OracleDataSource extends BaseDataSource { public class OracleDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(OracleDataSource.class);
private DbConnectType type;
public DbConnectType getType() {
return type;
}
public void setType(DbConnectType type) {
this.type = type;
}
/** /**
* @return driver class * @return driver class
*/ */

15
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessDefinition.java

@ -163,6 +163,11 @@ public class ProcessDefinition {
*/ */
private String modifyBy; private String modifyBy;
/**
* resource ids
*/
private String resourceIds;
public String getName() { public String getName() {
return name; return name;
@ -334,6 +339,14 @@ public class ProcessDefinition {
this.scheduleReleaseState = scheduleReleaseState; this.scheduleReleaseState = scheduleReleaseState;
} }
public String getResourceIds() {
return resourceIds;
}
public void setResourceIds(String resourceIds) {
this.resourceIds = resourceIds;
}
public int getTimeout() { public int getTimeout() {
return timeout; return timeout;
} }
@ -393,6 +406,8 @@ public class ProcessDefinition {
", timeout=" + timeout + ", timeout=" + timeout +
", tenantId=" + tenantId + ", tenantId=" + tenantId +
", modifyBy='" + modifyBy + '\'' + ", modifyBy='" + modifyBy + '\'' +
", resourceIds='" + resourceIds + '\'' +
'}'; '}';
} }
} }

68
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Resource.java

@ -32,11 +32,26 @@ public class Resource {
@TableId(value="id", type=IdType.AUTO) @TableId(value="id", type=IdType.AUTO)
private int id; private int id;
/**
* parent id
*/
private int pid;
/** /**
* resource alias * resource alias
*/ */
private String alias; private String alias;
/**
* full name
*/
private String fullName;
/**
* is directory
*/
private boolean isDirectory=false;
/** /**
* description * description
*/ */
@ -89,7 +104,15 @@ public class Resource {
this.updateTime = updateTime; this.updateTime = updateTime;
} }
public Resource(String alias, String fileName, String description, int userId, ResourceType type, long size, Date createTime, Date updateTime) { public Resource(int id, int pid, String alias, String fullName, boolean isDirectory) {
this.id = id;
this.pid = pid;
this.alias = alias;
this.fullName = fullName;
this.isDirectory = isDirectory;
}
/*public Resource(String alias, String fileName, String description, int userId, ResourceType type, long size, Date createTime, Date updateTime) {
this.alias = alias; this.alias = alias;
this.fileName = fileName; this.fileName = fileName;
this.description = description; this.description = description;
@ -98,6 +121,20 @@ public class Resource {
this.size = size; this.size = size;
this.createTime = createTime; this.createTime = createTime;
this.updateTime = updateTime; this.updateTime = updateTime;
}*/
public Resource(int pid, String alias, String fullName, boolean isDirectory, String description, String fileName, int userId, ResourceType type, long size, Date createTime, Date updateTime) {
this.pid = pid;
this.alias = alias;
this.fullName = fullName;
this.isDirectory = isDirectory;
this.description = description;
this.fileName = fileName;
this.userId = userId;
this.type = type;
this.size = size;
this.createTime = createTime;
this.updateTime = updateTime;
} }
public int getId() { public int getId() {
@ -116,6 +153,30 @@ public class Resource {
this.alias = alias; this.alias = alias;
} }
public int getPid() {
return pid;
}
public void setPid(int pid) {
this.pid = pid;
}
public String getFullName() {
return fullName;
}
public void setFullName(String fullName) {
this.fullName = fullName;
}
public boolean isDirectory() {
return isDirectory;
}
public void setDirectory(boolean directory) {
isDirectory = directory;
}
public String getFileName() { public String getFileName() {
return fileName; return fileName;
} }
@ -177,9 +238,12 @@ public class Resource {
public String toString() { public String toString() {
return "Resource{" + return "Resource{" +
"id=" + id + "id=" + id +
", pid=" + pid +
", alias='" + alias + '\'' + ", alias='" + alias + '\'' +
", fileName='" + fileName + '\'' + ", fullName='" + fullName + '\'' +
", isDirectory=" + isDirectory +
", description='" + description + '\'' + ", description='" + description + '\'' +
", fileName='" + fileName + '\'' +
", userId=" + userId + ", userId=" + userId +
", type=" + type + ", type=" + type +
", size=" + size + ", size=" + size +

11
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java

@ -20,9 +20,11 @@ import org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import com.baomidou.mybatisplus.core.mapper.BaseMapper; import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.core.metadata.IPage;
import org.apache.ibatis.annotations.MapKey;
import org.apache.ibatis.annotations.Param; import org.apache.ibatis.annotations.Param;
import java.util.List; import java.util.List;
import java.util.Map;
/** /**
* process definition mapper interface * process definition mapper interface
@ -83,7 +85,7 @@ public interface ProcessDefinitionMapper extends BaseMapper<ProcessDefinition> {
List<ProcessDefinition> queryDefinitionListByTenant(@Param("tenantId") int tenantId); List<ProcessDefinition> queryDefinitionListByTenant(@Param("tenantId") int tenantId);
/** /**
* count process definition group by user * count process definition group by user
* @param userId userId * @param userId userId
* @param projectIds projectIds * @param projectIds projectIds
* @param isAdmin isAdmin * @param isAdmin isAdmin
@ -93,4 +95,11 @@ public interface ProcessDefinitionMapper extends BaseMapper<ProcessDefinition> {
@Param("userId") Integer userId, @Param("userId") Integer userId,
@Param("projectIds") Integer[] projectIds, @Param("projectIds") Integer[] projectIds,
@Param("isAdmin") boolean isAdmin); @Param("isAdmin") boolean isAdmin);
/**
* list all resource ids
* @return resource ids list
*/
@MapKey("id")
List<Map<String, Object>> listResources();
} }

53
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.java

@ -30,12 +30,12 @@ public interface ResourceMapper extends BaseMapper<Resource> {
/** /**
* query resource list * query resource list
* @param alias alias * @param fullName full name
* @param userId userId * @param userId userId
* @param type type * @param type type
* @return resource list * @return resource list
*/ */
List<Resource> queryResourceList(@Param("alias") String alias, List<Resource> queryResourceList(@Param("fullName") String fullName,
@Param("userId") int userId, @Param("userId") int userId,
@Param("type") int type); @Param("type") int type);
@ -59,6 +59,7 @@ public interface ResourceMapper extends BaseMapper<Resource> {
*/ */
IPage<Resource> queryResourcePaging(IPage<Resource> page, IPage<Resource> queryResourcePaging(IPage<Resource> page,
@Param("userId") int userId, @Param("userId") int userId,
@Param("id") int id,
@Param("type") int type, @Param("type") int type,
@Param("searchVal") String searchVal); @Param("searchVal") String searchVal);
@ -76,13 +77,13 @@ public interface ResourceMapper extends BaseMapper<Resource> {
*/ */
List<Resource> queryResourceExceptUserId(@Param("userId") int userId); List<Resource> queryResourceExceptUserId(@Param("userId") int userId);
/** /**
* query tenant code by name * query tenant code by name
* @param resName resource name * @param resName resource name
* @param resType resource type
* @return tenant code * @return tenant code
*/ */
String queryTenantCodeByResourceName(@Param("resName") String resName); String queryTenantCodeByResourceName(@Param("resName") String resName,@Param("resType") int resType);
/** /**
* list authorized resource * list authorized resource
@ -91,4 +92,48 @@ public interface ResourceMapper extends BaseMapper<Resource> {
* @return resource list * @return resource list
*/ */
<T> List<Resource> listAuthorizedResource(@Param("userId") int userId,@Param("resNames")T[] resNames); <T> List<Resource> listAuthorizedResource(@Param("userId") int userId,@Param("resNames")T[] resNames);
/**
* list authorized resource
* @param userId userId
* @param resIds resource ids
* @return resource list
*/
<T> List<Resource> listAuthorizedResourceById(@Param("userId") int userId,@Param("resIds")T[] resIds);
/**
* delete resource by id array
* @param resIds resource id array
* @return delete num
*/
int deleteIds(@Param("resIds")Integer[] resIds);
/**
* list children
* @param direcotyId directory id
* @return resource id array
*/
List<Integer> listChildren(@Param("direcotyId") int direcotyId);
/**
* query resource by full name or pid
* @param fullName full name
* @param type resource type
* @return resource
*/
List<Resource> queryResource(@Param("fullName") String fullName,@Param("type") int type);
/**
* list resource by id array
* @param resIds resource id array
* @return resource list
*/
List<Resource> listResourceByIds(@Param("resIds")Integer[] resIds);
/**
* update resource
* @param resourceList resource list
* @return update num
*/
int batchUpdateResource(@Param("resourceList") List<Resource> resourceList);
} }

15
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.java

@ -86,4 +86,19 @@ public interface UdfFuncMapper extends BaseMapper<UdfFunc> {
*/ */
<T> List<UdfFunc> listAuthorizedUdfFunc (@Param("userId") int userId,@Param("udfIds")T[] udfIds); <T> List<UdfFunc> listAuthorizedUdfFunc (@Param("userId") int userId,@Param("udfIds")T[] udfIds);
/**
* list UDF by resource id
* @param resourceIds resource id array
* @return UDF function list
*/
List<UdfFunc> listUdfByResourceId(@Param("resourceIds") int[] resourceIds);
/**
* list authorized UDF by resource id
* @param resourceIds resource id array
* @return UDF function list
*/
List<UdfFunc> listAuthorizedUdfByResourceId(@Param("userId") int userId,@Param("resourceIds") int[] resourceIds);
} }

11
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml

@ -29,7 +29,9 @@
and pd.name = #{processDefinitionName} and pd.name = #{processDefinitionName}
</select> </select>
<select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> <select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
SELECT td.*,sc.schedule_release_state,tu.user_name SELECT td.id, td.name, td.version, td.release_state, td.project_id, td.user_id, td.description, td.global_params,
td.flag, td.receivers, td.receivers_cc, td.timeout, td.tenant_id, td.modify_by, td.update_time, td.create_time,
sc.schedule_release_state, tu.user_name
FROM t_ds_process_definition td FROM t_ds_process_definition td
left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id
left join t_ds_user tu on td.user_id = tu.id left join t_ds_user tu on td.user_id = tu.id
@ -87,4 +89,11 @@
pd.user_id = u.id AND pd.project_id = p.id pd.user_id = u.id AND pd.project_id = p.id
AND pd.id = #{processDefineId} AND pd.id = #{processDefineId}
</select> </select>
<select id="listResources" resultType="java.util.HashMap">
SELECT id,resource_ids
FROM t_ds_process_definition
WHERE release_state = 1 and resource_ids is not null and resource_ids != ''
</select>
</mapper> </mapper>

7
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml

@ -66,7 +66,12 @@
</select> </select>
<select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> <select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select instance.* select instance.id, instance.name, instance.process_definition_id, instance.state, instance.recovery, instance.start_time,
instance.end_time, instance.run_times, instance.host, instance.command_type, instance.command_param, instance.task_depend_type,
instance.max_try_times, instance.failure_strategy, instance.warning_type, instance.warning_group_id, instance.schedule_time,
instance.command_start_time, instance.global_params, instance.flag, instance.is_sub_process, instance.executor_id,
instance.history_cmd, instance.dependence_schedule_times, instance.process_instance_priority, instance.worker_group_id,
instance.timeout, instance.tenant_id, instance.update_time
from t_ds_process_instance instance from t_ds_process_instance instance
join t_ds_process_definition define ON instance.process_definition_id = define.id join t_ds_process_definition define ON instance.process_definition_id = define.id
where 1=1 where 1=1

79
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml

@ -22,8 +22,8 @@
select * select *
from t_ds_resources from t_ds_resources
where 1= 1 where 1= 1
<if test="alias != null and alias != ''"> <if test="fullName != null and fullName != ''">
and alias = #{alias} and full_name = #{fullName}
</if> </if>
<if test="type != -1"> <if test="type != -1">
and type = #{type} and type = #{type}
@ -47,8 +47,8 @@
<select id="queryResourcePaging" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> <select id="queryResourcePaging" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select * select *
from t_ds_resources from t_ds_resources
where type=#{type} where type=#{type} and pid=#{id}
<if test="userId != 0"> <if test="userId != 0 and id == -1">
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId} and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
union select id as resources_id from t_ds_resources where user_id=#{userId}) union select id as resources_id from t_ds_resources where user_id=#{userId})
</if> </if>
@ -70,7 +70,74 @@
<select id="queryTenantCodeByResourceName" resultType="java.lang.String"> <select id="queryTenantCodeByResourceName" resultType="java.lang.String">
select tenant_code select tenant_code
from t_ds_tenant t, t_ds_user u, t_ds_resources res from t_ds_tenant t, t_ds_user u, t_ds_resources res
where t.id = u.tenant_id and u.id = res.user_id and res.type=0 where t.id = u.tenant_id and u.id = res.user_id and res.type=#{resType}
and res.alias= #{resName} and res.full_name= #{resName}
</select>
<select id="listAuthorizedResource" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where type=0
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
union select id as resources_id from t_ds_resources where user_id=#{userId})
<if test="resNames != null and resNames != ''">
and full_name in
<foreach collection="resNames" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<select id="listAuthorizedResourceById" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
union select id as resources_id from t_ds_resources where user_id=#{userId})
<if test="resIds != null and resIds != ''">
and id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<delete id="deleteIds" parameterType="java.lang.Integer">
delete from t_ds_resources where id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</delete>
<select id="listChildren" resultType="java.lang.Integer">
select id
from t_ds_resources
where pid = #{direcotyId}
</select>
<select id="queryResource" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where type = #{type}
and full_name = #{fullName}
</select>
<update id="batchUpdateResource" parameterType="java.util.List">
<foreach collection="resourceList" item="resource" index="index" open="" close="" separator =";">
update t_ds_resources
<set>
full_name=#{resource.fullName},
update_time=#{resource.updateTime}
</set>
<where>
id=#{resource.id}
</where>
</foreach>
</update>
<select id="listResourceByIds" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
select *
from t_ds_resources
where id in
<foreach collection="resIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</select> </select>
</mapper> </mapper>

24
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml

@ -87,4 +87,28 @@
</foreach> </foreach>
</if> </if>
</select> </select>
<select id="listUdfByResourceId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where 1=1
<if test="resourceIds != null and resourceIds != ''">
and resource_id in
<foreach collection="resourceIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<select id="listAuthorizedUdfByResourceId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where
id in (select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
union select id as udf_id from t_ds_udfs where user_id=#{userId})
<if test="resourceIds != null and resourceIds != ''">
and resource_id in
<foreach collection="resourceIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
</mapper> </mapper>

57
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapperTest.java

@ -34,6 +34,7 @@ import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.junit4.SpringRunner; import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.annotation.Transactional; import org.springframework.transaction.annotation.Transactional;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Date; import java.util.Date;
import java.util.List; import java.util.List;
@ -68,7 +69,10 @@ public class ResourceMapperTest {
private Resource insertOne(){ private Resource insertOne(){
//insertOne //insertOne
Resource resource = new Resource(); Resource resource = new Resource();
resource.setAlias("ut resource"); resource.setAlias("ut-resource");
resource.setFullName("/ut-resource");
resource.setPid(-1);
resource.setDirectory(false);
resource.setType(ResourceType.FILE); resource.setType(ResourceType.FILE);
resource.setUserId(111); resource.setUserId(111);
resourceMapper.insert(resource); resourceMapper.insert(resource);
@ -80,16 +84,32 @@ public class ResourceMapperTest {
* @param user user * @param user user
* @return Resource * @return Resource
*/ */
private Resource createResource(User user){ private Resource createResource(User user,boolean isDirectory,ResourceType resourceType,int pid,String alias,String fullName){
//insertOne //insertOne
Resource resource = new Resource(); Resource resource = new Resource();
resource.setAlias(String.format("ut resource %s",user.getUserName())); resource.setDirectory(isDirectory);
resource.setType(ResourceType.FILE); resource.setType(resourceType);
resource.setAlias(alias);
resource.setFullName(fullName);
resource.setUserId(user.getId()); resource.setUserId(user.getId());
resourceMapper.insert(resource); resourceMapper.insert(resource);
return resource; return resource;
} }
/**
* create resource by user
* @param user user
* @return Resource
*/
private Resource createResource(User user){
//insertOne
String alias = String.format("ut-resource-%s",user.getUserName());
String fullName = String.format("/%s",alias);
Resource resource = createResource(user, false, ResourceType.FILE, -1, alias, fullName);
return resource;
}
/** /**
* create user * create user
* @return User * @return User
@ -200,13 +220,15 @@ public class ResourceMapperTest {
IPage<Resource> resourceIPage = resourceMapper.queryResourcePaging( IPage<Resource> resourceIPage = resourceMapper.queryResourcePaging(
page, page,
resource.getUserId(), 0,
-1,
resource.getType().ordinal(), resource.getType().ordinal(),
"" ""
); );
IPage<Resource> resourceIPage1 = resourceMapper.queryResourcePaging( IPage<Resource> resourceIPage1 = resourceMapper.queryResourcePaging(
page, page,
1110, 1110,
-1,
resource.getType().ordinal(), resource.getType().ordinal(),
"" ""
); );
@ -289,7 +311,7 @@ public class ResourceMapperTest {
resourceMapper.updateById(resource); resourceMapper.updateById(resource);
String resource1 = resourceMapper.queryTenantCodeByResourceName( String resource1 = resourceMapper.queryTenantCodeByResourceName(
resource.getAlias() resource.getFullName(),ResourceType.FILE.ordinal()
); );
@ -305,22 +327,37 @@ public class ResourceMapperTest {
User generalUser2 = createGeneralUser("user2"); User generalUser2 = createGeneralUser("user2");
// create one resource // create one resource
Resource resource = createResource(generalUser2); Resource resource = createResource(generalUser2);
Resource unauthorizedResource = createResource(generalUser2); Resource unauthorizedResource = createResource(generalUser1);
// need download resources // need download resources
String[] resNames = new String[]{resource.getAlias(), unauthorizedResource.getAlias()}; String[] resNames = new String[]{resource.getFullName(), unauthorizedResource.getFullName()};
List<Resource> resources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames); List<Resource> resources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames);
Assert.assertEquals(generalUser2.getId(),resource.getUserId()); Assert.assertEquals(generalUser2.getId(),resource.getUserId());
Assert.assertFalse(resources.stream().map(t -> t.getAlias()).collect(toList()).containsAll(Arrays.asList(resNames))); Assert.assertFalse(resources.stream().map(t -> t.getFullName()).collect(toList()).containsAll(Arrays.asList(resNames)));
// authorize object unauthorizedResource to generalUser // authorize object unauthorizedResource to generalUser
createResourcesUser(unauthorizedResource,generalUser2); createResourcesUser(unauthorizedResource,generalUser2);
List<Resource> authorizedResources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames); List<Resource> authorizedResources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames);
Assert.assertTrue(authorizedResources.stream().map(t -> t.getAlias()).collect(toList()).containsAll(Arrays.asList(resNames))); Assert.assertTrue(authorizedResources.stream().map(t -> t.getFullName()).collect(toList()).containsAll(Arrays.asList(resNames)));
}
@Test
public void deleteIdsTest(){
// create a general user
User generalUser1 = createGeneralUser("user1");
Resource resource = createResource(generalUser1);
Resource resource1 = createResource(generalUser1);
List<Integer> resourceList = new ArrayList<>();
resourceList.add(resource.getId());
resourceList.add(resource1.getId());
int result = resourceMapper.deleteIds(resourceList.toArray(new Integer[resourceList.size()]));
Assert.assertEquals(result,2);
} }
} }

50
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskScheduleThread.java

@ -23,15 +23,18 @@ import com.alibaba.fastjson.JSON;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter;
import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.common.utils.LoggerUtils; import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.log.TaskLogDiscriminator; import org.apache.dolphinscheduler.common.log.TaskLogDiscriminator;
@ -96,7 +99,7 @@ public class TaskScheduleThread implements Runnable {
TaskNode taskNode = JSON.parseObject(taskInstance.getTaskJson(), TaskNode.class); TaskNode taskNode = JSON.parseObject(taskInstance.getTaskJson(), TaskNode.class);
// get resource files // get resource files
List<String> resourceFiles = createProjectResFiles(taskNode); List<ResourceInfo> resourceFiles = createProjectResFiles(taskNode);
// copy hdfs/minio file to local // copy hdfs/minio file to local
downloadResource( downloadResource(
taskInstance.getExecutePath(), taskInstance.getExecutePath(),
@ -165,6 +168,7 @@ public class TaskScheduleThread implements Runnable {
new Date(), new Date(),
taskInstance.getId()); taskInstance.getId());
} }
/** /**
* get global paras map * get global paras map
* @return * @return
@ -289,14 +293,16 @@ public class TaskScheduleThread implements Runnable {
/** /**
* create project resource files * create project resource files
*/ */
private List<String> createProjectResFiles(TaskNode taskNode) throws Exception{ private List<ResourceInfo> createProjectResFiles(TaskNode taskNode) throws Exception{
Set<String> projectFiles = new HashSet<>(); Set<ResourceInfo> projectFiles = new HashSet<>();
AbstractParameters baseParam = TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams()); AbstractParameters baseParam = TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams());
if (baseParam != null) { if (baseParam != null) {
List<String> projectResourceFiles = baseParam.getResourceFilesList(); List<ResourceInfo> projectResourceFiles = baseParam.getResourceFilesList();
projectFiles.addAll(projectResourceFiles); if (projectResourceFiles != null) {
projectFiles.addAll(projectResourceFiles);
}
} }
return new ArrayList<>(projectFiles); return new ArrayList<>(projectFiles);
@ -309,18 +315,25 @@ public class TaskScheduleThread implements Runnable {
* @param projectRes * @param projectRes
* @param logger * @param logger
*/ */
private void downloadResource(String execLocalPath, List<String> projectRes, Logger logger) throws Exception { private void downloadResource(String execLocalPath, List<ResourceInfo> projectRes, Logger logger) throws Exception {
checkDownloadPermission(projectRes); checkDownloadPermission(projectRes);
for (String res : projectRes) { String resourceName;
File resFile = new File(execLocalPath, res); for (ResourceInfo res : projectRes) {
if (res.getId() != 0) {
Resource resource = processService.getResourceById(res.getId());
resourceName = resource.getFullName();
}else{
resourceName = res.getRes();
}
File resFile = new File(execLocalPath, resourceName);
if (!resFile.exists()) { if (!resFile.exists()) {
try { try {
// query the tenant code of the resource according to the name of the resource // query the tenant code of the resource according to the name of the resource
String tentnCode = processService.queryTenantCodeByResName(res); String tentnCode = processService.queryTenantCodeByResName(resourceName, ResourceType.FILE);
String resHdfsPath = HadoopUtils.getHdfsFilename(tentnCode, res); String resHdfsPath = HadoopUtils.getHdfsResourceFileName(tentnCode, resourceName);
logger.info("get resource file from hdfs :{}", resHdfsPath); logger.info("get resource file from hdfs :{}", resHdfsPath);
HadoopUtils.getInstance().copyHdfsToLocal(resHdfsPath, execLocalPath + File.separator + res, false, true); HadoopUtils.getInstance().copyHdfsToLocal(resHdfsPath, execLocalPath + File.separator + resourceName, false, true);
}catch (Exception e){ }catch (Exception e){
logger.error(e.getMessage(),e); logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage()); throw new RuntimeException(e.getMessage());
@ -336,10 +349,17 @@ public class TaskScheduleThread implements Runnable {
* @param projectRes resource name list * @param projectRes resource name list
* @throws Exception exception * @throws Exception exception
*/ */
private void checkDownloadPermission(List<String> projectRes) throws Exception { private void checkDownloadPermission(List<ResourceInfo> projectRes) throws Exception {
int userId = taskInstance.getProcessInstance().getExecutorId(); int userId = taskInstance.getProcessInstance().getExecutorId();
String[] resNames = projectRes.toArray(new String[projectRes.size()]); if (projectRes.stream().allMatch(t->t.getId() == 0)) {
PermissionCheck<String> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE, processService,resNames,userId,logger); String[] resNames = projectRes.stream().map(t -> t.getRes()).collect(Collectors.toList()).toArray(new String[projectRes.size()]);
permissionCheck.checkPermission(); PermissionCheck<String> permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_NAME,processService,resNames,userId,logger);
permissionCheck.checkPermission();
}else{
Integer[] resIds = projectRes.stream().map(t -> t.getId()).collect(Collectors.toList()).toArray(new Integer[projectRes.size()]);
PermissionCheck<Integer> permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_ID,processService,resIds,userId,logger);
permissionCheck.checkPermission();
}
} }
} }

5
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java

@ -94,4 +94,9 @@ public abstract class AbstractYarnTask extends AbstractTask {
* @throws Exception exception * @throws Exception exception
*/ */
protected abstract String buildCommand() throws Exception; protected abstract String buildCommand() throws Exception;
/**
* set main jar name
*/
protected abstract void setMainJarName();
} }

25
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java

@ -17,12 +17,14 @@
package org.apache.dolphinscheduler.server.worker.task.flink; package org.apache.dolphinscheduler.server.worker.task.flink;
import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters; import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.FlinkArgsUtils; import org.apache.dolphinscheduler.server.utils.FlinkArgsUtils;
import org.apache.dolphinscheduler.server.utils.ParamUtils; import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask; import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
@ -63,6 +65,7 @@ public class FlinkTask extends AbstractYarnTask {
if (!flinkParameters.checkParameters()) { if (!flinkParameters.checkParameters()) {
throw new RuntimeException("flink task params is not valid"); throw new RuntimeException("flink task params is not valid");
} }
setMainJarName();
flinkParameters.setQueue(taskProps.getQueue()); flinkParameters.setQueue(taskProps.getQueue());
if (StringUtils.isNotEmpty(flinkParameters.getMainArgs())) { if (StringUtils.isNotEmpty(flinkParameters.getMainArgs())) {
@ -111,6 +114,28 @@ public class FlinkTask extends AbstractYarnTask {
return command; return command;
} }
@Override
protected void setMainJarName() {
// main jar
ResourceInfo mainJar = flinkParameters.getMainJar();
if (mainJar != null) {
int resourceId = mainJar.getId();
String resourceName;
if (resourceId == 0) {
resourceName = mainJar.getRes();
} else {
Resource resource = processService.getResourceById(flinkParameters.getMainJar().getId());
if (resource == null) {
logger.error("resource id: {} not exist", resourceId);
throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
}
resourceName = resource.getFullName().replaceFirst("/", "");
}
mainJar.setRes(resourceName);
flinkParameters.setMainJar(mainJar);
}
}
@Override @Override
public AbstractParameters getParameters() { public AbstractParameters getParameters() {
return flinkParameters; return flinkParameters;

26
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/mr/MapReduceTask.java

@ -19,11 +19,13 @@ package org.apache.dolphinscheduler.server.worker.task.mr;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.mr.MapreduceParameters; import org.apache.dolphinscheduler.common.task.mr.MapreduceParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.ParamUtils; import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask; import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps; import org.apache.dolphinscheduler.server.worker.task.TaskProps;
@ -64,7 +66,7 @@ public class MapReduceTask extends AbstractYarnTask {
if (!mapreduceParameters.checkParameters()) { if (!mapreduceParameters.checkParameters()) {
throw new RuntimeException("mapreduce task params is not valid"); throw new RuntimeException("mapreduce task params is not valid");
} }
setMainJarName();
mapreduceParameters.setQueue(taskProps.getQueue()); mapreduceParameters.setQueue(taskProps.getQueue());
// replace placeholder // replace placeholder
@ -99,6 +101,28 @@ public class MapReduceTask extends AbstractYarnTask {
return command; return command;
} }
@Override
protected void setMainJarName() {
// main jar
ResourceInfo mainJar = mapreduceParameters.getMainJar();
if (mainJar != null) {
int resourceId = mainJar.getId();
String resourceName;
if (resourceId == 0) {
resourceName = mainJar.getRes();
} else {
Resource resource = processService.getResourceById(mapreduceParameters.getMainJar().getId());
if (resource == null) {
logger.error("resource id: {} not exist", resourceId);
throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
}
resourceName = resource.getFullName().replaceFirst("/", "");
}
mainJar.setRes(resourceName);
mapreduceParameters.setMainJar(mainJar);
}
}
@Override @Override
public AbstractParameters getParameters() { public AbstractParameters getParameters() {
return mapreduceParameters; return mapreduceParameters;

26
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTask.java

@ -18,11 +18,13 @@ package org.apache.dolphinscheduler.server.worker.task.spark;
import org.apache.dolphinscheduler.common.enums.SparkVersion; import org.apache.dolphinscheduler.common.enums.SparkVersion;
import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.spark.SparkParameters; import org.apache.dolphinscheduler.common.task.spark.SparkParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.ParamUtils; import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.SparkArgsUtils; import org.apache.dolphinscheduler.server.utils.SparkArgsUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask; import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
@ -67,8 +69,8 @@ public class SparkTask extends AbstractYarnTask {
if (!sparkParameters.checkParameters()) { if (!sparkParameters.checkParameters()) {
throw new RuntimeException("spark task params is not valid"); throw new RuntimeException("spark task params is not valid");
} }
setMainJarName();
sparkParameters.setQueue(taskProps.getQueue()); sparkParameters.setQueue(taskProps.getQueue());
if (StringUtils.isNotEmpty(sparkParameters.getMainArgs())) { if (StringUtils.isNotEmpty(sparkParameters.getMainArgs())) {
String args = sparkParameters.getMainArgs(); String args = sparkParameters.getMainArgs();
@ -115,6 +117,28 @@ public class SparkTask extends AbstractYarnTask {
return command; return command;
} }
@Override
protected void setMainJarName() {
// main jar
ResourceInfo mainJar = sparkParameters.getMainJar();
if (mainJar != null) {
int resourceId = mainJar.getId();
String resourceName;
if (resourceId == 0) {
resourceName = mainJar.getRes();
} else {
Resource resource = processService.getResourceById(sparkParameters.getMainJar().getId());
if (resource == null) {
logger.error("resource id: {} not exist", resourceId);
throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
}
resourceName = resource.getFullName().replaceFirst("/", "");
}
mainJar.setRes(resourceName);
sparkParameters.setMainJar(mainJar);
}
}
@Override @Override
public AbstractParameters getParameters() { public AbstractParameters getParameters() {
return sparkParameters; return sparkParameters;

8
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java

@ -25,6 +25,7 @@ import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.alert.utils.MailUtils; import org.apache.dolphinscheduler.alert.utils.MailUtils;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.ShowType; import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.enums.UdfType; import org.apache.dolphinscheduler.common.enums.UdfType;
@ -247,11 +248,12 @@ public class SqlTask extends AbstractTask {
List<String> createFuncs){ List<String> createFuncs){
Connection connection = null; Connection connection = null;
try { try {
// if upload resource is HDFS and kerberos startup
CommonUtils.loadKerberosConf();
// if hive , load connection params if exists // if hive , load connection params if exists
if (HIVE == dataSource.getType()) { if (DbType.HIVE == dataSource.getType() || DbType.SPARK == dataSource.getType()) {
// if upload resource is HDFS and kerberos startup
CommonUtils.loadKerberosConf();
Properties paramProp = new Properties(); Properties paramProp = new Properties();
paramProp.setProperty(USER, baseDataSource.getUser()); paramProp.setProperty(USER, baseDataSource.getUser());
paramProp.setProperty(PASSWORD, baseDataSource.getPassword()); paramProp.setProperty(PASSWORD, baseDataSource.getPassword());

4
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTask.java

@ -71,6 +71,10 @@ public class SqoopTask extends AbstractYarnTask {
return null; return null;
} }
@Override
protected void setMainJarName() {
}
@Override @Override
public AbstractParameters getParameters() { public AbstractParameters getParameters() {
return sqoopParameters; return sqoopParameters;

31
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/permission/PermissionCheck.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.service.permission;
import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.process.ProcessService;
@ -45,6 +46,11 @@ public class PermissionCheck<T> {
*/ */
private T[] needChecks; private T[] needChecks;
/**
* resoruce info
*/
private List<ResourceInfo> resourceList;
/** /**
* user id * user id
*/ */
@ -90,6 +96,22 @@ public class PermissionCheck<T> {
this.logger = logger; this.logger = logger;
} }
/**
* permission check
* @param logger
* @param authorizationType
* @param processService
* @param resourceList
* @param userId
*/
public PermissionCheck(AuthorizationType authorizationType, ProcessService processService, List<ResourceInfo> resourceList, int userId,Logger logger) {
this.authorizationType = authorizationType;
this.processService = processService;
this.resourceList = resourceList;
this.userId = userId;
this.logger = logger;
}
public AuthorizationType getAuthorizationType() { public AuthorizationType getAuthorizationType() {
return authorizationType; return authorizationType;
} }
@ -122,6 +144,14 @@ public class PermissionCheck<T> {
this.userId = userId; this.userId = userId;
} }
public List<ResourceInfo> getResourceList() {
return resourceList;
}
public void setResourceList(List<ResourceInfo> resourceList) {
this.resourceList = resourceList;
}
/** /**
* has permission * has permission
* @return true if has permission * @return true if has permission
@ -141,6 +171,7 @@ public class PermissionCheck<T> {
*/ */
public void checkPermission() throws Exception{ public void checkPermission() throws Exception{
if(this.needChecks.length > 0){ if(this.needChecks.length > 0){
// get user type in order to judge whether the user is admin // get user type in order to judge whether the user is admin
User user = processService.getUserById(userId); User user = processService.getUserById(userId);
if (user.getUserType() != UserType.ADMIN_USER){ if (user.getUserType() != UserType.ADMIN_USER){

26
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java

@ -1556,10 +1556,11 @@ public class ProcessService {
/** /**
* find tenant code by resource name * find tenant code by resource name
* @param resName resource name * @param resName resource name
* @param resourceType resource type
* @return tenant code * @return tenant code
*/ */
public String queryTenantCodeByResName(String resName){ public String queryTenantCodeByResName(String resName,ResourceType resourceType){
return resourceMapper.queryTenantCodeByResourceName(resName); return resourceMapper.queryTenantCodeByResourceName(resName,resourceType.ordinal());
} }
/** /**
@ -1791,10 +1792,18 @@ public class ProcessService {
Set<T> originResSet = new HashSet<T>(Arrays.asList(needChecks)); Set<T> originResSet = new HashSet<T>(Arrays.asList(needChecks));
switch (authorizationType){ switch (authorizationType){
case RESOURCE_FILE: case RESOURCE_FILE_ID:
Set<String> authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(t -> t.getAlias()).collect(toSet()); Set<Integer> authorizedResourceFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
Set<String> authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(t -> t.getFullName()).collect(toSet());
originResSet.removeAll(authorizedResources); originResSet.removeAll(authorizedResources);
break; break;
case UDF_FILE:
Set<Integer> authorizedUdfFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedUdfFiles);
break;
case DATASOURCE: case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId,needChecks).stream().map(t -> t.getId()).collect(toSet()); Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId,needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedDatasources); originResSet.removeAll(authorizedDatasources);
@ -1820,5 +1829,14 @@ public class ProcessService {
return userMapper.queryDetailsById(userId); return userMapper.queryDetailsById(userId);
} }
/**
* get resource by resoruce id
* @param resoruceId resource id
* @return Resource
*/
public Resource getResourceById(int resoruceId){
return resourceMapper.selectById(resoruceId);
}
} }

24
dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/_source/createDataSource.vue

@ -128,6 +128,15 @@
</x-input> </x-input>
</template> </template>
</m-list-box-f> </m-list-box-f>
<m-list-box-f v-if="showConnectType">
<template slot="name"><strong>*</strong>{{$t('Oracle Connect Type')}}</template>
<template slot="content">
<x-radio-group v-model="connectType" size="small">
<x-radio :label="'ORACLE_SERVICE_NAME'">{{$t('Oracle Service Name')}}</x-radio>
<x-radio :label="'ORACLE_SID'">{{$t('Oracle SID')}}</x-radio>
</x-radio-group>
</template>
</m-list-box-f>
<m-list-box-f> <m-list-box-f>
<template slot="name">{{$t('jdbc connect parameters')}}</template> <template slot="name">{{$t('jdbc connect parameters')}}</template>
<template slot="content"> <template slot="content">
@ -152,7 +161,7 @@
<script> <script>
import i18n from '@/module/i18n' import i18n from '@/module/i18n'
import store from '@/conf/home/store' import store from '@/conf/home/store'
import { isJson } from '@/module/util/util' import {isJson} from '@/module/util/util'
import mPopup from '@/module/components/popup/popup' import mPopup from '@/module/components/popup/popup'
import mListBoxF from '@/module/components/listBoxF/listBoxF' import mListBoxF from '@/module/components/listBoxF/listBoxF'
@ -181,12 +190,15 @@
userName: '', userName: '',
// Database password // Database password
password: '', password: '',
// Database connect type
connectType: '',
// Jdbc connection parameter // Jdbc connection parameter
other: '', other: '',
// btn test loading // btn test loading
testLoading: false, testLoading: false,
showPrincipal: true, showPrincipal: true,
showdDatabase: false, showdDatabase: false,
showConnectType: false,
isShowPrincipal:true, isShowPrincipal:true,
prePortMapper:{} prePortMapper:{}
} }
@ -229,6 +241,7 @@
principal: this.principal, principal: this.principal,
userName: this.userName, userName: this.userName,
password: this.password, password: this.password,
connectType: this.connectType,
other: this.other other: this.other
} }
}, },
@ -339,6 +352,7 @@
this.database = res.database this.database = res.database
this.userName = res.userName this.userName = res.userName
this.password = res.password this.password = res.password
this.connectType = res.connectType
this.other = JSON.stringify(res.other) === '{}' ? '' : JSON.stringify(res.other) this.other = JSON.stringify(res.other) === '{}' ? '' : JSON.stringify(res.other)
}).catch(e => { }).catch(e => {
this.$message.error(e.msg || '') this.$message.error(e.msg || '')
@ -415,6 +429,14 @@
this.showdDatabase = false; this.showdDatabase = false;
} }
if (value== 'ORACLE') {
this.showConnectType = true;
this.connectType = 'ORACLE_SERVICE_NAME'
} else {
this.showConnectType = false;
this.connectType = ''
}
//Set default port for each type datasource //Set default port for each type datasource
this._setDefaultValues(value) this._setDefaultValues(value)

0
dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/_source/common.js

0
dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/list.vue

0
dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/index.vue

0
dolphinscheduler-ui/src/js/conf/home/store/resource/actions.js

0
dolphinscheduler-ui/src/js/module/components/fileUpdate/fileUpdate.vue

3
dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js

@ -135,6 +135,9 @@ export default {
'Please enter port': 'Please enter port', 'Please enter port': 'Please enter port',
'Database Name': 'Database Name', 'Database Name': 'Database Name',
'Please enter database name': 'Please enter database name', 'Please enter database name': 'Please enter database name',
'Oracle Connect Type': 'ServiceName or SID',
'Oracle Service Name': 'ServiceName',
'Oracle SID': 'SID',
'jdbc connect parameters': 'jdbc connect parameters', 'jdbc connect parameters': 'jdbc connect parameters',
'Test Connect': 'Test Connect', 'Test Connect': 'Test Connect',
'Please enter resource name': 'Please enter resource name', 'Please enter resource name': 'Please enter resource name',

3
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js

@ -139,6 +139,9 @@ export default {
'Please enter port': '请输入端口', 'Please enter port': '请输入端口',
'Database Name': '数据库名', 'Database Name': '数据库名',
'Please enter database name': '请输入数据库名', 'Please enter database name': '请输入数据库名',
'Oracle Connect Type': '服务名或SID',
'Oracle Service Name': '服务名',
'Oracle SID': 'SID',
'jdbc connect parameters': 'jdbc连接参数', 'jdbc connect parameters': 'jdbc连接参数',
'Test Connect': '测试连接', 'Test Connect': '测试连接',
'Please enter resource name': '请输入数据源名称', 'Please enter resource name': '请输入数据源名称',

1
pom.xml

@ -699,6 +699,7 @@
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include> <include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include> <include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/enums/*.java</include> <include>**/api/enums/*.java</include>
<include>**/api/controller/DataSourceControllerTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include> <include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include> <include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include> <include>**/api/service/MonitorServiceTest.java</include>

2
sql/soft_version

@ -1 +1 @@
1.2.0 1.2.2

80
sql/upgrade/1.2.2_schema/mysql/dolphinscheduler_ddl.sql

@ -75,3 +75,83 @@ d//
delimiter ; delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_C_app_link; CALL uc_dolphin_T_t_ds_task_instance_C_app_link;
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_C_app_link; DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_C_app_link;
-- ac_dolphin_T_t_ds_resources_A_pid
drop PROCEDURE if EXISTS ac_dolphin_T_t_ds_resources_A_pid;
delimiter d//
CREATE PROCEDURE ac_dolphin_T_t_ds_resources_A_pid()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_resources'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='pid')
THEN
ALTER TABLE t_ds_resources ADD `pid` int(11) DEFAULT -1 COMMENT 'parent id';
END IF;
END;
d//
delimiter ;
CALL ac_dolphin_T_t_ds_resources_A_pid;
DROP PROCEDURE ac_dolphin_T_t_ds_resources_A_pid;
-- ac_dolphin_T_t_ds_resources_A_full_name
drop PROCEDURE if EXISTS ac_dolphin_T_t_ds_resources_A_full_name;
delimiter d//
CREATE PROCEDURE ac_dolphin_T_t_ds_resources_A_full_name()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_resources'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='full_name')
THEN
ALTER TABLE t_ds_resources ADD `full_name` varchar(255) DEFAULT NULL COMMENT 'full name';
END IF;
END;
d//
delimiter ;
CALL ac_dolphin_T_t_ds_resources_A_full_name;
DROP PROCEDURE ac_dolphin_T_t_ds_resources_A_full_name;
-- ac_dolphin_T_t_ds_resources_A_pid
drop PROCEDURE if EXISTS ac_dolphin_T_t_ds_resources_is_directory;
delimiter d//
CREATE PROCEDURE ac_dolphin_T_t_ds_resources_is_directory()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_resources'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='is_directory')
THEN
ALTER TABLE t_ds_resources ADD `is_directory` tinyint(1) DEFAULT 0 COMMENT 'is directory';
END IF;
END;
d//
delimiter ;
CALL ac_dolphin_T_t_ds_resources_is_directory;
DROP PROCEDURE ac_dolphin_T_t_ds_resources_is_directory;
-- ac_dolphin_T_t_ds_process_definition_A_resource_ids
drop PROCEDURE if EXISTS ac_dolphin_T_t_ds_process_definition_A_resource_ids;
delimiter d//
CREATE PROCEDURE ac_dolphin_T_t_ds_process_definition_A_resource_ids()
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_definition'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='resource_ids')
THEN
ALTER TABLE t_ds_process_definition ADD `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids';
END IF;
END;
d//
delimiter ;
CALL ac_dolphin_T_t_ds_process_definition_A_resource_ids;
DROP PROCEDURE ac_dolphin_T_t_ds_process_definition_A_resource_ids;

83
sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_ddl.sql

@ -67,3 +67,86 @@ d//
delimiter ; delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_C_app_link(); SELECT uc_dolphin_T_t_ds_task_instance_C_app_link();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_C_app_link(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_C_app_link();
-- ac_dolphin_T_t_ds_resources_A_pid
delimiter d//
CREATE FUNCTION ac_dolphin_T_t_ds_resources_A_pid() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_resources'
AND COLUMN_NAME ='pid')
THEN
ALTER TABLE t_ds_resources ADD COLUMN pid int DEFAULT -1;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select ac_dolphin_T_t_ds_resources_A_pid();
DROP FUNCTION ac_dolphin_T_t_ds_resources_A_pid();
-- ac_dolphin_T_t_ds_resources_A_full_name
delimiter ;
DROP FUNCTION IF EXISTS ac_dolphin_T_t_ds_resources_A_full_name();
delimiter d//
CREATE FUNCTION ac_dolphin_T_t_ds_resources_A_full_name() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_resources'
AND COLUMN_NAME ='full_name')
THEN
ALTER TABLE t_ds_resources ADD COLUMN full_name varchar(255) DEFAULT null;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select ac_dolphin_T_t_ds_resources_A_full_name();
DROP FUNCTION ac_dolphin_T_t_ds_resources_A_full_name();
-- ac_dolphin_T_t_ds_resources_A_is_directory
delimiter ;
DROP FUNCTION IF EXISTS ac_dolphin_T_t_ds_resources_A_is_directory();
delimiter d//
CREATE FUNCTION ac_dolphin_T_t_ds_resources_A_is_directory() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_resources'
AND COLUMN_NAME ='is_directory')
THEN
ALTER TABLE t_ds_resources ADD COLUMN is_directory boolean DEFAULT false;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select ac_dolphin_T_t_ds_resources_A_is_directory();
DROP FUNCTION ac_dolphin_T_t_ds_resources_A_is_directory();
-- ac_dolphin_T_t_ds_process_definition_A_resource_ids
delimiter ;
DROP FUNCTION IF EXISTS ac_dolphin_T_t_ds_process_definition_A_resource_ids();
delimiter d//
CREATE FUNCTION ac_dolphin_T_t_ds_process_definition_A_resource_ids() RETURNS void AS $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_CATALOG=current_database()
AND TABLE_SCHEMA=current_schema()
AND TABLE_NAME='t_ds_process_definition'
AND COLUMN_NAME ='resource_ids')
THEN
ALTER TABLE t_ds_process_definition ADD COLUMN resource_ids varchar(255) DEFAULT null;
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
select ac_dolphin_T_t_ds_process_definition_A_resource_ids();
DROP FUNCTION ac_dolphin_T_t_ds_process_definition_A_resource_ids();

2
sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_dml.sql

@ -14,3 +14,5 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
UPDATE t_ds_resources SET pid=-1,is_directory=false WHERE pid IS NULL;
UPDATE t_ds_resources SET full_name = concat('/',alias) WHERE pid=-1 and full_name IS NULL;
Loading…
Cancel
Save