diff --git a/charts/dolphinscheduler/README.md b/charts/dolphinscheduler/README.md deleted file mode 100644 index 6f0317b9e2..0000000000 --- a/charts/dolphinscheduler/README.md +++ /dev/null @@ -1,226 +0,0 @@ -# Dolphin Scheduler - -[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing. - -## Introduction -This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -- Kubernetes 1.10+ -- PV provisioner support in the underlying infrastructure - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```bash -$ git clone https://github.com/apache/incubator-dolphinscheduler.git -$ cd incubator-dolphinscheduler -$ helm install --name dolphinscheduler . -``` -These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. - -> **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `dolphinscheduler` deployment: - -```bash -$ helm delete --purge dolphinscheduler -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Configuration - -The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values. - -| Parameter | Description | Default | -| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- | -| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` | -| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` | -| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` | -| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` | -| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` | -| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` | -| | | | -| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` | -| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` | -| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` | -| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` | -| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` | -| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` | -| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` | -| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` | -| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` | -| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` | -| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` | -| | | | -| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` | -| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` | -| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` | -| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` | -| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` | -| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` | -| | | | -| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` | -| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` | -| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `master.tolerations` | If specified, the pod's tolerations | `{}` | -| `master.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` | -| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` | -| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` | -| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` | -| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` | -| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` | -| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` | -| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` | -| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` | -| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` | -| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `worker.tolerations` | If specified, the pod's tolerations | `{}` | -| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` | -| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` | -| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` | -| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` | -| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` | -| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` | -| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` | -| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` | -| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` | -| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` | -| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` | -| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` | -| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` | -| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` | -| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `alert.tolerations` | If specified, the pod's tolerations | `{}` | -| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` | -| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` | -| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` | -| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` | -| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` | -| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` | -| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` | -| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` | -| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` | -| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` | -| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` | -| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` | -| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` | -| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` | -| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` | -| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` | -| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` | -| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` | -| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` | -| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `api.tolerations` | If specified, the pod's tolerations | `{}` | -| `api.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` | -| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` | -| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` | -| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` | -| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` | -| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `frontend.tolerations` | If specified, the pod's tolerations | `{}` | -| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` | -| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `ingress.enabled` | Enable ingress | `false` | -| `ingress.host` | Ingress host | `dolphinscheduler.org` | -| `ingress.path` | Ingress path | `/` | -| `ingress.tls.enabled` | Enable ingress tls | `false` | -| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` | -| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` | - -For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation. diff --git a/charts/dolphinscheduler/Chart.yaml b/kubernetes/dolphinscheduler/Chart.yaml similarity index 86% rename from charts/dolphinscheduler/Chart.yaml rename to kubernetes/dolphinscheduler/Chart.yaml index 2c40f94d3c..ac989d571f 100644 --- a/charts/dolphinscheduler/Chart.yaml +++ b/kubernetes/dolphinscheduler/Chart.yaml @@ -21,8 +21,8 @@ description: Dolphin Scheduler is a distributed and easy-to-expand visual DAG wo home: https://dolphinscheduler.apache.org icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg keywords: - - dolphinscheduler - - Scheduler +- dolphinscheduler +- Scheduler # A chart can be either an 'application' or a 'library' chart. # # Application charts are a collection of templates that can be packaged into versioned archives @@ -35,18 +35,18 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 0.1.0 +version: 1.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 1.2.1 +appVersion: 1.3.0 dependencies: - - name: postgresql - version: 8.x.x - repository: https://charts.bitnami.com/bitnami - condition: postgresql.enabled - - name: zookeeper - version: 5.x.x - repository: https://charts.bitnami.com/bitnami - condition: redis.enabled +- name: postgresql + version: 8.x.x + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled +- name: zookeeper + version: 5.x.x + repository: https://charts.bitnami.com/bitnami + condition: redis.enabled diff --git a/charts/README.md b/kubernetes/dolphinscheduler/README.md similarity index 99% rename from charts/README.md rename to kubernetes/dolphinscheduler/README.md index 6f0317b9e2..9e6d1c6448 100644 --- a/charts/README.md +++ b/kubernetes/dolphinscheduler/README.md @@ -16,7 +16,9 @@ To install the chart with the release name `my-release`: ```bash $ git clone https://github.com/apache/incubator-dolphinscheduler.git -$ cd incubator-dolphinscheduler +$ cd incubator-dolphinscheduler/kubernetes/dolphinscheduler +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm dependency update . $ helm install --name dolphinscheduler . ``` These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. diff --git a/kubernetes/dolphinscheduler/requirements.yaml b/kubernetes/dolphinscheduler/requirements.yaml new file mode 100644 index 0000000000..a2fde1b40c --- /dev/null +++ b/kubernetes/dolphinscheduler/requirements.yaml @@ -0,0 +1,9 @@ +dependencies: +- name: postgresql + version: 8.x.x + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled +- name: zookeeper + version: 5.x.x + repository: https://charts.bitnami.com/bitnami + condition: redis.enabled \ No newline at end of file diff --git a/charts/dolphinscheduler/templates/NOTES.txt b/kubernetes/dolphinscheduler/templates/NOTES.txt similarity index 100% rename from charts/dolphinscheduler/templates/NOTES.txt rename to kubernetes/dolphinscheduler/templates/NOTES.txt diff --git a/charts/dolphinscheduler/templates/_helpers.tpl b/kubernetes/dolphinscheduler/templates/_helpers.tpl similarity index 88% rename from charts/dolphinscheduler/templates/_helpers.tpl rename to kubernetes/dolphinscheduler/templates/_helpers.tpl index 37fb034128..9ba290b771 100644 --- a/charts/dolphinscheduler/templates/_helpers.tpl +++ b/kubernetes/dolphinscheduler/templates/_helpers.tpl @@ -130,20 +130,4 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- define "dolphinscheduler.worker.base.dir" -}} {{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}} {{- printf "%s" $name | trunc 63 | trimSuffix "/" -}} -{{- end -}} - -{{/* -Create a default dolphinscheduler worker data download dir. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "dolphinscheduler.worker.data.download.dir" -}} -{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/download" -}} -{{- end -}} - -{{/* -Create a default dolphinscheduler worker process exec dir. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "dolphinscheduler.worker.process.exec.dir" -}} -{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/exec" -}} {{- end -}} \ No newline at end of file diff --git a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml b/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml similarity index 100% rename from charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml rename to kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml diff --git a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml b/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml similarity index 90% rename from charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml rename to kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml index 8cce068276..da82d639cb 100644 --- a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml +++ b/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml @@ -31,4 +31,6 @@ data: MASTER_TASK_COMMIT_INTERVAL: {{ .Values.master.configmap.MASTER_TASK_COMMIT_INTERVAL | quote }} MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }} MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }} + MASTER_LISTEN_PORT: {{ .Values.master.configmap.MASTER_LISTEN_PORT | quote }} + DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }} {{- end }} \ No newline at end of file diff --git a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml b/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml similarity index 88% rename from charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml rename to kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml index be7391fb32..1e08b67b53 100644 --- a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml +++ b/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml @@ -29,9 +29,9 @@ data: WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }} WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }} WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }} + WORKER_LISTEN_PORT: {{ .Values.worker.configmap.WORKER_LISTEN_PORT | quote }} + WORKER_GROUP: {{ .Values.worker.configmap.WORKER_GROUP | quote }} DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }} - DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH: {{ include "dolphinscheduler.worker.data.download.dir" . | quote }} - DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH: {{ include "dolphinscheduler.worker.process.exec.dir" . | quote }} dolphinscheduler_env.sh: |- {{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }} {{ . }} diff --git a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml b/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml similarity index 99% rename from charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml rename to kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml index 26026f74b3..f3bb1dd880 100644 --- a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml +++ b/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml @@ -195,7 +195,7 @@ spec: command: - sh - /root/checkpoint.sh - - worker-server + - AlertServer initialDelaySeconds: {{ .Values.alert.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.alert.livenessProbe.periodSeconds }} timeoutSeconds: {{ .Values.alert.livenessProbe.timeoutSeconds }} @@ -208,7 +208,7 @@ spec: command: - sh - /root/checkpoint.sh - - worker-server + - AlertServer initialDelaySeconds: {{ .Values.alert.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.alert.readinessProbe.periodSeconds }} timeoutSeconds: {{ .Values.alert.readinessProbe.timeoutSeconds }} diff --git a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml b/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml similarity index 100% rename from charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml rename to kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml diff --git a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml b/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml similarity index 100% rename from charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml rename to kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml diff --git a/charts/dolphinscheduler/templates/ingress.yaml b/kubernetes/dolphinscheduler/templates/ingress.yaml similarity index 100% rename from charts/dolphinscheduler/templates/ingress.yaml rename to kubernetes/dolphinscheduler/templates/ingress.yaml diff --git a/charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml b/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml similarity index 100% rename from charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml rename to kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml diff --git a/charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml b/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml similarity index 100% rename from charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml rename to kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml diff --git a/charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml b/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml similarity index 100% rename from charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml rename to kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml diff --git a/charts/dolphinscheduler/templates/secret-external-postgresql.yaml b/kubernetes/dolphinscheduler/templates/secret-external-postgresql.yaml similarity index 100% rename from charts/dolphinscheduler/templates/secret-external-postgresql.yaml rename to kubernetes/dolphinscheduler/templates/secret-external-postgresql.yaml diff --git a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml b/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml similarity index 94% rename from charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml rename to kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml index ac974128b7..adc35ce804 100644 --- a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml +++ b/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml @@ -109,8 +109,8 @@ spec: args: - "master-server" ports: - - containerPort: 8888 - name: unused-tcp-port + - containerPort: {{ .Values.master.configmap.MASTER_LISTEN_PORT }} + name: "master-port" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - name: TZ @@ -150,6 +150,16 @@ spec: configMapKeyRef: name: {{ include "dolphinscheduler.fullname" . }}-master key: MASTER_RESERVED_MEMORY + - name: MASTER_LISTEN_PORT + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-master + key: MASTER_LISTEN_PORT + - name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-master + key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH - name: POSTGRESQL_HOST {{- if .Values.postgresql.enabled }} value: {{ template "dolphinscheduler.postgresql.fullname" . }} @@ -178,12 +188,6 @@ spec: name: {{ printf "%s-%s" .Release.Name "externaldb" }} key: db-password {{- end }} - - name: TASK_QUEUE - {{- if .Values.zookeeper.enabled }} - value: {{ .Values.zookeeper.taskQueue }} - {{- else }} - value: {{ .Values.externalZookeeper.taskQueue }} - {{- end }} - name: ZOOKEEPER_QUORUM {{- if .Values.zookeeper.enabled }} value: {{ template "dolphinscheduler.zookeeper.quorum" . }} @@ -196,7 +200,7 @@ spec: command: - sh - /root/checkpoint.sh - - master-server + - MasterServer initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} @@ -209,7 +213,7 @@ spec: command: - sh - /root/checkpoint.sh - - master-server + - MasterServer initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} diff --git a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml b/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml similarity index 92% rename from charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml rename to kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml index a2407978b4..5a03c52775 100644 --- a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml +++ b/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml @@ -109,6 +109,8 @@ spec: args: - "worker-server" ports: + - containerPort: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }} + name: "worker-port" - containerPort: 50051 name: "logs-port" imagePullPolicy: {{ .Values.image.pullPolicy }} @@ -140,6 +142,21 @@ spec: configMapKeyRef: name: {{ include "dolphinscheduler.fullname" . }}-worker key: WORKER_RESERVED_MEMORY + - name: WORKER_LISTEN_PORT + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-worker + key: WORKER_LISTEN_PORT + - name: WORKER_GROUP + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-worker + key: WORKER_GROUP + - name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-master + key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH - name: POSTGRESQL_HOST {{- if .Values.postgresql.enabled }} value: {{ template "dolphinscheduler.postgresql.fullname" . }} @@ -168,12 +185,6 @@ spec: name: {{ printf "%s-%s" .Release.Name "externaldb" }} key: db-password {{- end }} - - name: TASK_QUEUE - {{- if .Values.zookeeper.enabled }} - value: {{ .Values.zookeeper.taskQueue }} - {{- else }} - value: {{ .Values.externalZookeeper.taskQueue }} - {{- end }} - name: ZOOKEEPER_QUORUM {{- if .Values.zookeeper.enabled }} value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}" @@ -186,7 +197,7 @@ spec: command: - sh - /root/checkpoint.sh - - worker-server + - WorkerServer initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds }} timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds }} @@ -199,7 +210,7 @@ spec: command: - sh - /root/checkpoint.sh - - worker-server + - WorkerServer initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds }} timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds }} @@ -247,7 +258,7 @@ spec: app.kubernetes.io/managed-by: {{ .Release.Service }} spec: accessModes: - {{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }} + {{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }} - {{ . | quote }} {{- end }} storageClassName: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }} @@ -264,7 +275,7 @@ spec: app.kubernetes.io/managed-by: {{ .Release.Service }} spec: accessModes: - {{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }} + {{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }} - {{ . | quote }} {{- end }} storageClassName: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }} diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml b/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml similarity index 100% rename from charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml rename to kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml b/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml similarity index 100% rename from charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml rename to kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml b/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml similarity index 89% rename from charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml rename to kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml index 7aaf0b4353..7f82cff31e 100644 --- a/charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml +++ b/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml @@ -25,10 +25,10 @@ metadata: spec: clusterIP: "None" ports: - - port: 8888 - targetPort: tcp-port + - port: {{ .Values.master.configmap.MASTER_LISTEN_PORT }} + targetPort: master-port protocol: TCP - name: unused-tcp-port + name: master-port selector: app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml b/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml similarity index 89% rename from charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml rename to kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml index 3e92a349d4..fb3b85b5c3 100644 --- a/charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml +++ b/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml @@ -25,6 +25,10 @@ metadata: spec: clusterIP: "None" ports: + - port: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }} + targetPort: worker-port + protocol: TCP + name: worker-port - port: 50051 targetPort: logs-port protocol: TCP diff --git a/charts/dolphinscheduler/values.yaml b/kubernetes/dolphinscheduler/values.yaml similarity index 98% rename from charts/dolphinscheduler/values.yaml rename to kubernetes/dolphinscheduler/values.yaml index 962a031a0c..4f70afade5 100644 --- a/charts/dolphinscheduler/values.yaml +++ b/kubernetes/dolphinscheduler/values.yaml @@ -27,7 +27,7 @@ timezone: "Asia/Shanghai" image: registry: "docker.io" repository: "dolphinscheduler" - tag: "1.2.1" + tag: "1.3.0" pullPolicy: "IfNotPresent" imagePullSecrets: [] @@ -56,6 +56,8 @@ externalDatabase: zookeeper: enabled: true taskQueue: "zookeeper" + service: + port: "2181" persistence: enabled: false size: "20Gi" @@ -91,6 +93,7 @@ master: MASTER_TASK_COMMIT_INTERVAL: "1000" MASTER_MAX_CPULOAD_AVG: "100" MASTER_RESERVED_MEMORY: "0.1" + MASTER_LISTEN_PORT: "5678" livenessProbe: enabled: true initialDelaySeconds: "30" @@ -156,6 +159,8 @@ worker: WORKER_FETCH_TASK_NUM: "3" WORKER_MAX_CPULOAD_AVG: "100" WORKER_RESERVED_MEMORY: "0.1" + WORKER_LISTEN_PORT: "1234" + WORKER_GROUP: "default" DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" DOLPHINSCHEDULER_ENV: - "export HADOOP_HOME=/opt/soft/hadoop"