Browse Source

merge from dev

pull/3/MERGE
lenboo 4 years ago
parent
commit
9f809de693
  1. 4
      .gitignore
  2. 57
      docker/build/Dockerfile
  3. 30
      docker/build/README.md
  4. 31
      docker/build/README_zh_CN.md
  5. 0
      docker/build/checkpoint.sh
  6. 12
      docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh
  7. 9
      docker/build/conf/dolphinscheduler/logback/logback-alert.xml
  8. 9
      docker/build/conf/dolphinscheduler/logback/logback-api.xml
  9. 9
      docker/build/conf/dolphinscheduler/logback/logback-master.xml
  10. 9
      docker/build/conf/dolphinscheduler/logback/logback-worker.xml
  11. 51
      docker/build/conf/nginx/dolphinscheduler.conf
  12. 47
      docker/build/conf/zookeeper/zoo.cfg
  13. 9
      docker/build/hooks/build
  14. 0
      docker/build/hooks/push
  15. 18
      docker/build/startup-init-conf.sh
  16. 55
      docker/build/startup.sh
  17. 2
      docker/docker-swarm/check
  18. 78
      docker/docker-swarm/docker-compose.yml
  19. 74
      docker/docker-swarm/docker-stack.yml
  20. 12
      docker/docker-swarm/dolphinscheduler_env.sh
  21. 10
      docker/kubernetes/dolphinscheduler/Chart.yaml
  22. 37
      docker/kubernetes/dolphinscheduler/README.md
  23. 25
      docker/kubernetes/dolphinscheduler/requirements.yaml
  24. 6
      docker/kubernetes/dolphinscheduler/templates/NOTES.txt
  25. 4
      docker/kubernetes/dolphinscheduler/templates/_helpers.tpl
  26. 1
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
  27. 21
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-api.yaml
  28. 8
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml
  29. 2
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml
  30. 7
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml
  31. 68
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
  32. 48
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
  33. 119
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
  34. 2
      docker/kubernetes/dolphinscheduler/templates/ingress.yaml
  35. 2
      docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml
  36. 2
      docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml
  37. 20
      docker/kubernetes/dolphinscheduler/templates/secret-external-fs-s3a.yaml
  38. 96
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
  39. 184
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
  40. 4
      docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml
  41. 310
      docker/kubernetes/dolphinscheduler/values.yaml
  42. 8
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
  43. 26
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/NetUtils.java
  44. 9
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/NetUtilsTest.java
  45. 6
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java
  46. 1
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/builder/TaskExecutionContextBuilder.java
  47. 14
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
  48. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java
  49. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/ConditionsTaskExecThread.java
  50. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/DependentTaskExecThread.java
  51. 4
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterSchedulerService.java
  52. 3
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
  53. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java
  54. 4
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/dispatch/executor/NettyExecutorManagerTest.java
  55. 4
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/registry/ZookeeperNodeManagerTest.java
  56. 2
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/ExecutionContextTestUtils.java
  57. 2
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistryTest.java
  58. 0
      dolphinscheduler-ui/install-dolphinscheduler-ui.sh
  59. 6
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/udp/udp.vue
  60. 22
      dolphinscheduler-ui/src/js/conf/home/pages/monitor/pages/servers/master.vue
  61. 2
      dolphinscheduler-ui/src/js/conf/home/pages/monitor/pages/servers/worker.vue
  62. 2
      dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/_source/list.vue
  63. 4
      dolphinscheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue
  64. 2
      e2e/src/test/resources/config/config.properties
  65. 1
      pom.xml
  66. 0
      script/create-dolphinscheduler.sh
  67. 6
      script/dolphinscheduler-daemon.sh
  68. 0
      script/env/dolphinscheduler_env.sh
  69. 0
      script/monitor-server.sh
  70. 0
      script/scp-hosts.sh
  71. 0
      script/start-all.sh
  72. 0
      script/status-all.sh
  73. 0
      script/stop-all.sh
  74. 0
      script/upgrade-dolphinscheduler.sh
  75. 7
      sql/dolphinscheduler-postgre.sql
  76. 4
      sql/dolphinscheduler_mysql.sql
  77. 58
      sql/upgrade/1.3.5_schema/mysql/dolphinscheduler_ddl.sql
  78. 16
      sql/upgrade/1.3.5_schema/mysql/dolphinscheduler_dml.sql
  79. 52
      sql/upgrade/1.3.5_schema/postgresql/dolphinscheduler_ddl.sql
  80. 16
      sql/upgrade/1.3.5_schema/postgresql/dolphinscheduler_dml.sql

4
.gitignore vendored

@ -7,6 +7,10 @@
.target .target
.idea/ .idea/
target/ target/
dist/
all-dependencies.txt
self-modules.txt
third-party-dependencies.txt
.settings .settings
.nbproject .nbproject
.classpath .classpath

57
docker/build/Dockerfile

@ -15,55 +15,37 @@
# limitations under the License. # limitations under the License.
# #
FROM nginx:alpine FROM openjdk:8-jdk-alpine
ARG VERSION ARG VERSION
ENV TZ Asia/Shanghai ENV TZ Asia/Shanghai
ENV LANG C.UTF-8 ENV LANG C.UTF-8
ENV DEBIAN_FRONTEND noninteractive ENV DOCKER true
#1. install dos2unix shadow bash openrc python sudo vim wget iputils net-tools ssh pip tini kazoo. # 1. install command/library/software
#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example: # If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories # RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
# RUN sed -i 's/dl-cdn.alpinelinux.org/mirror.tuna.tsinghua.edu.cn/g' /etc/apk/repositories
RUN apk update && \ RUN apk update && \
apk add --update --no-cache dos2unix shadow bash openrc python2 python3 sudo vim wget iputils net-tools openssh-server py-pip tini && \ apk add --no-cache tzdata dos2unix bash python2 python3 procps sudo shadow tini postgresql-client && \
apk add --update --no-cache procps && \ cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
openrc boot && \ apk del tzdata && \
pip install kazoo rm -rf /var/cache/apk/*
#2. install jdk # 2. add dolphinscheduler
RUN apk add --update --no-cache openjdk8
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
ENV PATH $JAVA_HOME/bin:$PATH
#3. add dolphinscheduler
ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz /opt/ ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz /opt/
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin/ /opt/dolphinscheduler/ RUN ln -s /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin /opt/dolphinscheduler
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#4. install database, if use mysql as your backend database, the `mysql-client` package should be installed # 3. add configuration and modify permissions and set soft links
RUN apk add --update --no-cache postgresql postgresql-contrib
#5. modify nginx
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \
rm -rf /etc/nginx/conf.d/*
COPY ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
#6. add configuration and modify permissions and set soft links
COPY ./checkpoint.sh /root/checkpoint.sh COPY ./checkpoint.sh /root/checkpoint.sh
COPY ./startup-init-conf.sh /root/startup-init-conf.sh COPY ./startup-init-conf.sh /root/startup-init-conf.sh
COPY ./startup.sh /root/startup.sh COPY ./startup.sh /root/startup.sh
COPY ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/ COPY ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/
COPY ./conf/dolphinscheduler/logback/* /opt/dolphinscheduler/conf/ COPY ./conf/dolphinscheduler/logback/* /opt/dolphinscheduler/conf/
COPY conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/ COPY ./conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/
RUN chmod +x /root/checkpoint.sh && \ RUN dos2unix /root/checkpoint.sh && \
chmod +x /root/startup-init-conf.sh && \
chmod +x /root/startup.sh && \
chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
chmod +x /opt/dolphinscheduler/script/*.sh && \
chmod +x /opt/dolphinscheduler/bin/*.sh && \
dos2unix /root/checkpoint.sh && \
dos2unix /root/startup-init-conf.sh && \ dos2unix /root/startup-init-conf.sh && \
dos2unix /root/startup.sh && \ dos2unix /root/startup.sh && \
dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \ dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
@ -71,13 +53,10 @@ RUN chmod +x /root/checkpoint.sh && \
dos2unix /opt/dolphinscheduler/bin/*.sh && \ dos2unix /opt/dolphinscheduler/bin/*.sh && \
rm -rf /bin/sh && \ rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \ ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls && \ mkdir -p /var/mail /tmp/xls && \
#7. remove apk index cache and disable coredup for sudo
rm -rf /var/cache/apk/* && \
echo "Set disable_coredump false" >> /etc/sudo.conf echo "Set disable_coredump false" >> /etc/sudo.conf
# 4. expose port
#8. expose port EXPOSE 5678 1234 12345 50051
EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"] ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]

30
docker/build/README.md

@ -17,7 +17,7 @@ Official Website: https://dolphinscheduler.apache.org
``` ```
$ docker run -dit --name dolphinscheduler \ $ docker run -dit --name dolphinscheduler \
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \ -e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
-p 8888:8888 \ -p 12345:12345 \
dolphinscheduler all dolphinscheduler all
``` ```
@ -33,7 +33,7 @@ You can specify **existing postgres service**. Example:
$ docker run -dit --name dolphinscheduler \ $ docker run -dit --name dolphinscheduler \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \ -e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \ -e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 8888:8888 \ -p 12345:12345 \
dolphinscheduler all dolphinscheduler all
``` ```
@ -43,7 +43,7 @@ You can specify **existing zookeeper service**. Example:
$ docker run -dit --name dolphinscheduler \ $ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" -e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \ -e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
-p 8888:8888 \ -p 12345:12345 \
dolphinscheduler all dolphinscheduler all
``` ```
@ -90,14 +90,6 @@ $ docker run -dit --name dolphinscheduler \
dolphinscheduler alert-server dolphinscheduler alert-server
``` ```
* Start a **frontend**, For example:
```
$ docker run -dit --name dolphinscheduler \
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
-p 8888:8888 \
dolphinscheduler frontend
```
**Note**: You must be specify `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server. **Note**: You must be specify `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
@ -306,18 +298,6 @@ This environment variable sets enterprise wechat agent id for `alert-server`. Th
This environment variable sets enterprise wechat users for `alert-server`. The default value is empty. This environment variable sets enterprise wechat users for `alert-server`. The default value is empty.
**`FRONTEND_API_SERVER_HOST`**
This environment variable sets api server host for `frontend`. The default value is `127.0.0.1`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
**`FRONTEND_API_SERVER_PORT`**
This environment variable sets api server port for `frontend`. The default value is `123451`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
## Initialization scripts ## Initialization scripts
If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`. If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`.
@ -343,8 +323,4 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
EOF EOF
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} " > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
done done
echo "generate nginx config"
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
``` ```

31
docker/build/README_zh_CN.md

@ -17,7 +17,7 @@ Official Website: https://dolphinscheduler.apache.org
``` ```
$ docker run -dit --name dolphinscheduler \ $ docker run -dit --name dolphinscheduler \
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \ -e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
-p 8888:8888 \ -p 12345:12345 \
dolphinscheduler all dolphinscheduler all
``` ```
@ -33,7 +33,7 @@ dolphinscheduler all
$ docker run -dit --name dolphinscheduler \ $ docker run -dit --name dolphinscheduler \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \ -e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \ -e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 8888:8888 \ -p 12345:12345 \
dolphinscheduler all dolphinscheduler all
``` ```
@ -43,7 +43,7 @@ dolphinscheduler all
$ docker run -dit --name dolphinscheduler \ $ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" -e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \ -e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
-p 8888:8888 \ -p 12345:12345 \
dolphinscheduler all dolphinscheduler all
``` ```
@ -90,15 +90,6 @@ $ docker run -dit --name dolphinscheduler \
dolphinscheduler alert-server dolphinscheduler alert-server
``` ```
* 启动一个 **frontend**, 如下:
```
$ docker run -dit --name dolphinscheduler \
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
-p 8888:8888 \
dolphinscheduler frontend
```
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM` **注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM`
## 如何构建一个docker镜像 ## 如何构建一个docker镜像
@ -306,18 +297,6 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些
配置`alert-server`的邮件服务企业微信`USERS`,默认值 `空` 配置`alert-server`的邮件服务企业微信`USERS`,默认值 `空`
**`FRONTEND_API_SERVER_HOST`**
配置`frontend`的连接`api-server`的地址,默认值 `127.0.0.1`
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
**`FRONTEND_API_SERVER_PORT`**
配置`frontend`的连接`api-server`的端口,默认值 `12345`
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
## 初始化脚本 ## 初始化脚本
如果你想在编译的时候或者运行的时候附加一些其它的操作及新增一些环境变量,你可以在`/root/start-init-conf.sh`文件中进行修改,同时如果涉及到配置文件的修改,请在`/opt/dolphinscheduler/conf/*.tpl`中修改相应的配置文件 如果你想在编译的时候或者运行的时候附加一些其它的操作及新增一些环境变量,你可以在`/root/start-init-conf.sh`文件中进行修改,同时如果涉及到配置文件的修改,请在`/opt/dolphinscheduler/conf/*.tpl`中修改相应的配置文件
@ -343,8 +322,4 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
EOF EOF
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} " > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
done done
echo "generate nginx config"
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
``` ```

0
docker/build/checkpoint.sh

12
docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh vendored

@ -15,6 +15,14 @@
# limitations under the License. # limitations under the License.
# #
export PYTHON_HOME=/usr/bin/python2 export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
export PATH=$PYTHON_HOME/bin:$JAVA_HOME/bin:$PATH export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink
export DATAX_HOME=/opt/soft/datax/bin/datax.py
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH

9
docker/build/conf/dolphinscheduler/logback/logback-alert.xml

@ -20,14 +20,6 @@
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> <configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/> <property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-alert.log</file> <file>${log.base}/dolphinscheduler-alert.log</file>
@ -45,7 +37,6 @@
</appender> </appender>
<root level="INFO"> <root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="ALERTLOGFILE"/> <appender-ref ref="ALERTLOGFILE"/>
</root> </root>

9
docker/build/conf/dolphinscheduler/logback/logback-api.xml

@ -20,14 +20,6 @@
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> <configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/> <property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- api server logback config start --> <!-- api server logback config start -->
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
@ -55,7 +47,6 @@
<root level="INFO"> <root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/> <appender-ref ref="APILOGFILE"/>
</root> </root>

9
docker/build/conf/dolphinscheduler/logback/logback-master.xml

@ -20,14 +20,6 @@
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> <configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/> <property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<conversionRule conversionWord="messsage" <conversionRule conversionWord="messsage"
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/> converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
@ -74,7 +66,6 @@
<!-- master server logback config end --> <!-- master server logback config end -->
<root level="INFO"> <root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/> <appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="MASTERLOGFILE"/> <appender-ref ref="MASTERLOGFILE"/>
</root> </root>

9
docker/build/conf/dolphinscheduler/logback/logback-worker.xml

@ -20,14 +20,6 @@
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> <configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/> <property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- worker server logback config start --> <!-- worker server logback config start -->
<conversionRule conversionWord="messsage" <conversionRule conversionWord="messsage"
@ -75,7 +67,6 @@
<!-- worker server logback config end --> <!-- worker server logback config end -->
<root level="INFO"> <root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/> <appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/> <appender-ref ref="WORKERLOGFILE"/>
</root> </root>

51
docker/build/conf/nginx/dolphinscheduler.conf

@ -1,51 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
server {
listen 8888;
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /opt/dolphinscheduler/ui;
index index.html index.html;
}
location /dolphinscheduler/ui{
alias /opt/dolphinscheduler/ui;
}
location /dolphinscheduler {
proxy_pass http://FRONTEND_API_SERVER_HOST:FRONTEND_API_SERVER_PORT;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header x_real_ipP $remote_addr;
proxy_set_header remote_addr $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_connect_timeout 300s;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

47
docker/build/conf/zookeeper/zoo.cfg

@ -1,47 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
#Four Letter Words commands:stat,ruok,conf,isro
4lw.commands.whitelist=*

9
docker/build/hooks/build

@ -48,7 +48,12 @@ echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubati
mv "$(pwd)"/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-"${VERSION}"-dolphinscheduler-bin.tar.gz $(pwd)/docker/build/ mv "$(pwd)"/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-"${VERSION}"-dolphinscheduler-bin.tar.gz $(pwd)/docker/build/
# docker build # docker build
echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/\n" BUILD_COMMAND="docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/"
sudo docker build --build-arg VERSION="${VERSION}" -t $DOCKER_REPO:"${VERSION}" "$(pwd)/docker/build/" echo -e "$BUILD_COMMAND\n"
if (docker info 2> /dev/null | grep -i "ERROR"); then
sudo $BUILD_COMMAND
else
$BUILD_COMMAND
fi
echo "------ dolphinscheduler end - build -------" echo "------ dolphinscheduler end - build -------"

0
docker/build/hooks/push

18
docker/build/startup-init-conf.sh

@ -39,9 +39,9 @@ export DATABASE_PARAMS=${DATABASE_PARAMS:-"characterEncoding=utf8"}
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"} export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"}
export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"} export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"}
export DOLPHINSCHEDULER_OPTS=${DOLPHINSCHEDULER_OPTS:-""} export DOLPHINSCHEDULER_OPTS=${DOLPHINSCHEDULER_OPTS:-""}
export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"NONE"} export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"HDFS"}
export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/ds"} export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/dolphinscheduler"}
export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"s3a://xxxx"} export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"file:///"}
export FS_S3A_ENDPOINT=${FS_S3A_ENDPOINT:-"s3.xxx.amazonaws.com"} export FS_S3A_ENDPOINT=${FS_S3A_ENDPOINT:-"s3.xxx.amazonaws.com"}
export FS_S3A_ACCESS_KEY=${FS_S3A_ACCESS_KEY:-"xxxxxxx"} export FS_S3A_ACCESS_KEY=${FS_S3A_ACCESS_KEY:-"xxxxxxx"}
export FS_S3A_SECRET_KEY=${FS_S3A_SECRET_KEY:-"xxxxxxx"} export FS_S3A_SECRET_KEY=${FS_S3A_SECRET_KEY:-"xxxxxxx"}
@ -81,7 +81,7 @@ export WORKER_WEIGHT=${WORKER_WEIGHT:-"100"}
#============================================================================ #============================================================================
# alert plugin dir # alert plugin dir
export ALERT_PLUGIN_DIR=${ALERT_PLUGIN_DIR:-"/opt/dolphinscheduler"} export ALERT_PLUGIN_DIR=${ALERT_PLUGIN_DIR:-"/opt/dolphinscheduler"}
# XLS FILE # xls file
export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"} export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"}
# mail # mail
export MAIL_SERVER_HOST=${MAIL_SERVER_HOST:-""} export MAIL_SERVER_HOST=${MAIL_SERVER_HOST:-""}
@ -99,12 +99,6 @@ export ENTERPRISE_WECHAT_SECRET=${ENTERPRISE_WECHAT_SECRET:-""}
export ENTERPRISE_WECHAT_AGENT_ID=${ENTERPRISE_WECHAT_AGENT_ID:-""} export ENTERPRISE_WECHAT_AGENT_ID=${ENTERPRISE_WECHAT_AGENT_ID:-""}
export ENTERPRISE_WECHAT_USERS=${ENTERPRISE_WECHAT_USERS:-""} export ENTERPRISE_WECHAT_USERS=${ENTERPRISE_WECHAT_USERS:-""}
#============================================================================
# Frontend
#============================================================================
export FRONTEND_API_SERVER_HOST=${FRONTEND_API_SERVER_HOST:-"127.0.0.1"}
export FRONTEND_API_SERVER_PORT=${FRONTEND_API_SERVER_PORT:-"12345"}
echo "generate app config" echo "generate app config"
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
eval "cat << EOF eval "cat << EOF
@ -112,7 +106,3 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
EOF EOF
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} " > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
done done
echo "generate nginx config"
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf

55
docker/build/startup.sh

@ -22,8 +22,8 @@ DOLPHINSCHEDULER_BIN=${DOLPHINSCHEDULER_HOME}/bin
DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script
DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
# start database # wait database
initDatabase() { waitDatabase() {
echo "test ${DATABASE_TYPE} service" echo "test ${DATABASE_TYPE} service"
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1)) counter=$((counter+1))
@ -43,19 +43,22 @@ initDatabase() {
exit 1 exit 1
fi fi
else else
v=$(sudo -u postgres PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1") v=$(PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
if [ "$(echo ${v} | grep 'FATAL' | wc -l)" -eq 1 ]; then if [ "$(echo ${v} | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}" echo "Error: Can't connect to database...${v}"
exit 1 exit 1
fi fi
fi fi
}
# init database
initDatabase() {
echo "import sql data" echo "import sql data"
${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh ${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh
} }
# start zk # wait zk
initZK() { waitZK() {
echo "connect remote zookeeper" echo "connect remote zookeeper"
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do while ! nc -z ${line%:*} ${line#*:}; do
@ -70,12 +73,6 @@ initZK() {
done done
} }
# start nginx
initNginx() {
echo "start nginx"
nginx &
}
# start master-server # start master-server
initMasterServer() { initMasterServer() {
echo "start master-server" echo "start master-server"
@ -115,59 +112,54 @@ initAlertServer() {
printUsage() { printUsage() {
echo -e "Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system," echo -e "Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system,"
echo -e "dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.\n" echo -e "dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.\n"
echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server | frontend ]\n" echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server ]\n"
printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server, alert-server and frontend." printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server and alert-server"
printf "%-13s: %s\n" "master-server" "MasterServer is mainly responsible for DAG task split, task submission monitoring." printf "%-13s: %s\n" "master-server" "MasterServer is mainly responsible for DAG task split, task submission monitoring."
printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services.." printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services."
printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests from the front-end UI layer." printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests and providing the front-end UI layer."
printf "%-13s: %s\n" "alert-server" "AlertServer mainly include Alarms." printf "%-13s: %s\n" "alert-server" "AlertServer mainly include Alarms."
printf "%-13s: %s\n" "frontend" "Frontend mainly provides various visual operation interfaces of the system."
} }
# init config file # init config file
source /root/startup-init-conf.sh source /root/startup-init-conf.sh
LOGFILE=/var/log/nginx/access.log
case "$1" in case "$1" in
(all) (all)
initZK waitZK
waitDatabase
initDatabase initDatabase
initMasterServer initMasterServer
initWorkerServer initWorkerServer
initApiServer initApiServer
initAlertServer initAlertServer
initLoggerServer initLoggerServer
initNginx LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
LOGFILE=/var/log/nginx/access.log
;; ;;
(master-server) (master-server)
initZK waitZK
initDatabase waitDatabase
initMasterServer initMasterServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log
;; ;;
(worker-server) (worker-server)
initZK waitZK
initDatabase waitDatabase
initWorkerServer initWorkerServer
initLoggerServer initLoggerServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log
;; ;;
(api-server) (api-server)
initZK waitZK
waitDatabase
initDatabase initDatabase
initApiServer initApiServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
;; ;;
(alert-server) (alert-server)
initDatabase waitDatabase
initAlertServer initAlertServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log
;; ;;
(frontend)
initNginx
LOGFILE=/var/log/nginx/access.log
;;
(help) (help)
printUsage printUsage
exit 1 exit 1
@ -179,8 +171,7 @@ case "$1" in
esac esac
# init directories and log files # init directories and log files
mkdir -p ${DOLPHINSCHEDULER_LOGS} && mkdir -p /var/log/nginx/ && cat /dev/null >> ${LOGFILE} mkdir -p ${DOLPHINSCHEDULER_LOGS} && cat /dev/null >> ${LOGFILE}
echo "tail begin" echo "tail begin"
exec bash -c "tail -n 1 -f ${LOGFILE}" exec bash -c "tail -n 1 -f ${LOGFILE}"

2
docker/docker-swarm/check

@ -25,7 +25,7 @@ else
echo "Server start failed "$server_num echo "Server start failed "$server_num
exit 1 exit 1
fi fi
ready=`curl http://127.0.0.1:8888/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l` ready=`curl http://127.0.0.1:12345/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l`
if [ $ready -eq 1 ] if [ $ready -eq 1 ]
then then
echo "Servers is ready" echo "Servers is ready"

78
docker/docker-swarm/docker-compose.yml

@ -31,6 +31,7 @@ services:
volumes: volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql - dolphinscheduler-postgresql:/bitnami/postgresql
- dolphinscheduler-postgresql-initdb:/docker-entrypoint-initdb.d - dolphinscheduler-postgresql-initdb:/docker-entrypoint-initdb.d
restart: unless-stopped
networks: networks:
- dolphinscheduler - dolphinscheduler
@ -45,13 +46,14 @@ services:
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
volumes: volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper - dolphinscheduler-zookeeper:/bitnami/zookeeper
restart: unless-stopped
networks: networks:
- dolphinscheduler - dolphinscheduler
dolphinscheduler-api: dolphinscheduler-api:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-api container_name: dolphinscheduler-api
command: ["api-server"] command: api-server
ports: ports:
- 12345:12345 - 12345:12345
environment: environment:
@ -62,6 +64,9 @@ services:
DATABASE_PASSWORD: root DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
RESOURCE_STORAGE_TYPE: HDFS
RESOURCE_UPLOAD_PATH: /dolphinscheduler
FS_DEFAULT_FS: file:///
healthcheck: healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"] test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30s interval: 30s
@ -72,37 +77,16 @@ services:
- dolphinscheduler-postgresql - dolphinscheduler-postgresql
- dolphinscheduler-zookeeper - dolphinscheduler-zookeeper
volumes: volumes:
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs - dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks: - dolphinscheduler-resource-local:/dolphinscheduler
- dolphinscheduler restart: unless-stopped
dolphinscheduler-frontend:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-frontend
command: ["frontend"]
ports:
- 8888:8888
environment:
TZ: Asia/Shanghai
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
FRONTEND_API_SERVER_PORT: 12345
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "8888"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-api
volumes:
- ./dolphinscheduler-logs:/var/log/nginx
networks: networks:
- dolphinscheduler - dolphinscheduler
dolphinscheduler-alert: dolphinscheduler-alert:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-alert container_name: dolphinscheduler-alert
command: ["alert-server"] command: alert-server
environment: environment:
TZ: Asia/Shanghai TZ: Asia/Shanghai
XLS_FILE_PATH: "/tmp/xls" XLS_FILE_PATH: "/tmp/xls"
@ -133,14 +117,15 @@ services:
depends_on: depends_on:
- dolphinscheduler-postgresql - dolphinscheduler-postgresql
volumes: volumes:
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs - dolphinscheduler-logs:/opt/dolphinscheduler/logs
restart: unless-stopped
networks: networks:
- dolphinscheduler - dolphinscheduler
dolphinscheduler-master: dolphinscheduler-master:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-master container_name: dolphinscheduler-master
command: ["master-server"] command: master-server
ports: ports:
- 5678:5678 - 5678:5678
environment: environment:
@ -168,14 +153,15 @@ services:
- dolphinscheduler-postgresql - dolphinscheduler-postgresql
- dolphinscheduler-zookeeper - dolphinscheduler-zookeeper
volumes: volumes:
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs - dolphinscheduler-logs:/opt/dolphinscheduler/logs
restart: unless-stopped
networks: networks:
- dolphinscheduler - dolphinscheduler
dolphinscheduler-worker: dolphinscheduler-worker:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-worker container_name: dolphinscheduler-worker
command: ["worker-server"] command: worker-server
ports: ports:
- 1234:1234 - 1234:1234
- 50051:50051 - 50051:50051
@ -188,13 +174,25 @@ services:
WORKER_RESERVED_MEMORY: "0.1" WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default" WORKER_GROUP: "default"
WORKER_WEIGHT: "100" WORKER_WEIGHT: "100"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
DATABASE_HOST: dolphinscheduler-postgresql DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432 DATABASE_PORT: 5432
DATABASE_USERNAME: root DATABASE_USERNAME: root
DATABASE_PASSWORD: root DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
RESOURCE_STORAGE_TYPE: HDFS
RESOURCE_UPLOAD_PATH: /dolphinscheduler
FS_DEFAULT_FS: file:///
healthcheck: healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"] test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
interval: 30s interval: 30s
@ -205,13 +203,11 @@ services:
- dolphinscheduler-postgresql - dolphinscheduler-postgresql
- dolphinscheduler-zookeeper - dolphinscheduler-zookeeper
volumes: volumes:
- type: bind - ./dolphinscheduler_env.sh:/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
source: ./dolphinscheduler_env.sh - dolphinscheduler-worker-data:/tmp/dolphinscheduler
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh - dolphinscheduler-logs:/opt/dolphinscheduler/logs
- type: volume - dolphinscheduler-resource-local:/dolphinscheduler
source: dolphinscheduler-worker-data restart: unless-stopped
target: /tmp/dolphinscheduler
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks: networks:
- dolphinscheduler - dolphinscheduler
@ -224,7 +220,5 @@ volumes:
dolphinscheduler-postgresql-initdb: dolphinscheduler-postgresql-initdb:
dolphinscheduler-zookeeper: dolphinscheduler-zookeeper:
dolphinscheduler-worker-data: dolphinscheduler-worker-data:
dolphinscheduler-logs:
configs: dolphinscheduler-resource-local:
dolphinscheduler-worker-task-env:
file: ./dolphinscheduler_env.sh

74
docker/docker-swarm/docker-stack.yml

@ -20,13 +20,13 @@ services:
dolphinscheduler-postgresql: dolphinscheduler-postgresql:
image: bitnami/postgresql:latest image: bitnami/postgresql:latest
ports:
- 5432:5432
environment: environment:
TZ: Asia/Shanghai TZ: Asia/Shanghai
POSTGRESQL_USERNAME: root POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler POSTGRESQL_DATABASE: dolphinscheduler
ports:
- 5432:5432
volumes: volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql - dolphinscheduler-postgresql:/bitnami/postgresql
networks: networks:
@ -37,12 +37,12 @@ services:
dolphinscheduler-zookeeper: dolphinscheduler-zookeeper:
image: bitnami/zookeeper:latest image: bitnami/zookeeper:latest
ports:
- 2181:2181
environment: environment:
TZ: Asia/Shanghai TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes" ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
ports:
- 2181:2181
volumes: volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper - dolphinscheduler-zookeeper:/bitnami/zookeeper
networks: networks:
@ -53,7 +53,9 @@ services:
dolphinscheduler-api: dolphinscheduler-api:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
command: ["api-server"] command: api-server
ports:
- 12345:12345
environment: environment:
TZ: Asia/Shanghai TZ: Asia/Shanghai
DATABASE_HOST: dolphinscheduler-postgresql DATABASE_HOST: dolphinscheduler-postgresql
@ -62,11 +64,12 @@ services:
DATABASE_PASSWORD: root DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
ports: RESOURCE_STORAGE_TYPE: HDFS
- 12345:12345 RESOURCE_UPLOAD_PATH: /dolphinscheduler
FS_DEFAULT_FS: file:///
healthcheck: healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"] test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30 interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
start_period: 30s start_period: 30s
@ -78,32 +81,9 @@ services:
mode: replicated mode: replicated
replicas: 1 replicas: 1
dolphinscheduler-frontend:
image: apache/dolphinscheduler:latest
command: ["frontend"]
ports:
- 8888:8888
environment:
TZ: Asia/Shanghai
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
FRONTEND_API_SERVER_PORT: 12345
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "8888"]
interval: 30
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-logs:/var/log/nginx
networks:
- dolphinscheduler
deploy:
mode: replicated
replicas: 1
dolphinscheduler-alert: dolphinscheduler-alert:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
command: ["alert-server"] command: alert-server
environment: environment:
TZ: Asia/Shanghai TZ: Asia/Shanghai
XLS_FILE_PATH: "/tmp/xls" XLS_FILE_PATH: "/tmp/xls"
@ -127,7 +107,7 @@ services:
DATABASE_DATABASE: dolphinscheduler DATABASE_DATABASE: dolphinscheduler
healthcheck: healthcheck:
test: ["CMD", "/root/checkpoint.sh", "AlertServer"] test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
interval: 30 interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
start_period: 30s start_period: 30s
@ -141,7 +121,7 @@ services:
dolphinscheduler-master: dolphinscheduler-master:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
command: ["master-server"] command: master-server
ports: ports:
- 5678:5678 - 5678:5678
environment: environment:
@ -161,7 +141,7 @@ services:
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck: healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"] test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
interval: 30 interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
start_period: 30s start_period: 30s
@ -175,7 +155,7 @@ services:
dolphinscheduler-worker: dolphinscheduler-worker:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
command: ["worker-server"] command: worker-server
ports: ports:
- 1234:1234 - 1234:1234
- 50051:50051 - 50051:50051
@ -188,25 +168,37 @@ services:
WORKER_RESERVED_MEMORY: "0.1" WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default" WORKER_GROUP: "default"
WORKER_WEIGHT: "100" WORKER_WEIGHT: "100"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
DATABASE_HOST: dolphinscheduler-postgresql DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432 DATABASE_PORT: 5432
DATABASE_USERNAME: root DATABASE_USERNAME: root
DATABASE_PASSWORD: root DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
RESOURCE_STORAGE_TYPE: HDFS
RESOURCE_UPLOAD_PATH: /dolphinscheduler
FS_DEFAULT_FS: file:///
healthcheck: healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"] test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
interval: 30 interval: 30s
timeout: 5s timeout: 5s
retries: 3 retries: 3
start_period: 30s start_period: 30s
volumes:
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
configs: configs:
- source: dolphinscheduler-worker-task-env - source: dolphinscheduler-worker-task-env
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
volumes:
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks: networks:
- dolphinscheduler - dolphinscheduler
deploy: deploy:

12
docker/docker-swarm/dolphinscheduler_env.sh

@ -15,6 +15,14 @@
# limitations under the License. # limitations under the License.
# #
export PYTHON_HOME=/usr/bin/python2 export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink
export DATAX_HOME=/opt/soft/datax/bin/datax.py
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH

10
docker/kubernetes/dolphinscheduler/Chart.yaml

@ -22,7 +22,7 @@ home: https://dolphinscheduler.apache.org
icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
keywords: keywords:
- dolphinscheduler - dolphinscheduler
- Scheduler - scheduler
# A chart can be either an 'application' or a 'library' chart. # A chart can be either an 'application' or a 'library' chart.
# #
# Application charts are a collection of templates that can be packaged into versioned archives # Application charts are a collection of templates that can be packaged into versioned archives
@ -35,18 +35,18 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
version: 1.0.0 version: 1.2.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. # incremented each time you make changes to the application.
appVersion: 1.3.0 appVersion: 1.4.0
dependencies: dependencies:
- name: postgresql - name: postgresql
version: 8.x.x version: 10.x.x
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled condition: postgresql.enabled
- name: zookeeper - name: zookeeper
version: 5.x.x version: 6.x.x
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: zookeeper.enabled condition: zookeeper.enabled

37
docker/kubernetes/dolphinscheduler/README.md

@ -7,19 +7,20 @@ This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org)
## Prerequisites ## Prerequisites
- Kubernetes 1.10+ - Helm 3.1.0+
- Kubernetes 1.12+
- PV provisioner support in the underlying infrastructure - PV provisioner support in the underlying infrastructure
## Installing the Chart ## Installing the Chart
To install the chart with the release name `my-release`: To install the chart with the release name `dolphinscheduler`:
```bash ```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git $ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler/kubernetes/dolphinscheduler $ cd incubator-dolphinscheduler/docker/kubernetes/dolphinscheduler
$ helm repo add bitnami https://charts.bitnami.com/bitnami $ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm dependency update . $ helm dependency update .
$ helm install --name dolphinscheduler . $ helm install dolphinscheduler .
``` ```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
@ -30,7 +31,7 @@ These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default
To uninstall/delete the `dolphinscheduler` deployment: To uninstall/delete the `dolphinscheduler` deployment:
```bash ```bash
$ helm delete --purge dolphinscheduler $ helm uninstall dolphinscheduler
``` ```
The command removes all the Kubernetes components associated with the chart and deletes the release. The command removes all the Kubernetes components associated with the chart and deletes the release.
@ -220,32 +221,6 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | | `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | | `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | | | | | |
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.resources` | The `resource` limit and request config for frontend server. | `{}` |
| `frontend.annotations` | The `annotations` for frontend server. | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `ingress.enabled` | Enable ingress | `false` | | `ingress.enabled` | Enable ingress | `false` |
| `ingress.host` | Ingress host | `dolphinscheduler.org` | | `ingress.host` | Ingress host | `dolphinscheduler.org` |
| `ingress.path` | Ingress path | `/` | | `ingress.path` | Ingress path | `/` |

25
docker/kubernetes/dolphinscheduler/requirements.yaml

@ -1,25 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

6
docker/kubernetes/dolphinscheduler/templates/NOTES.txt

@ -15,9 +15,9 @@
# limitations under the License. # limitations under the License.
# #
** Please be patient while the chart is being deployed ** ** Please be patient while the chart Dolphinscheduler {{ .Chart.AppVersion }} is being deployed **
1. Get the Dolphinscheduler URL by running: Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }} {{- if .Values.ingress.enabled }}
@ -26,6 +26,6 @@
{{- else }} {{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888 kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-api 12345:12345
{{- end }} {{- end }}

4
docker/kubernetes/dolphinscheduler/templates/_helpers.tpl

@ -135,7 +135,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
Create a default dolphinscheduler worker base dir. Create a default dolphinscheduler worker base dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}} */}}
{{- define "dolphinscheduler.worker.base.dir" -}} {{- define "dolphinscheduler.data.basedir.path" -}}
{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}} {{- $name := default "/tmp/dolphinscheduler" .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}} {{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
{{- end -}} {{- end -}}

1
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml

@ -24,6 +24,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
data: data:
DOLPHINSCHEDULER_OPTS: {{ .Values.alert.configmap.DOLPHINSCHEDULER_OPTS | quote }}
ALERT_PLUGIN_DIR: {{ .Values.alert.configmap.ALERT_PLUGIN_DIR | quote }} ALERT_PLUGIN_DIR: {{ .Values.alert.configmap.ALERT_PLUGIN_DIR | quote }}
XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }} XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }} MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}

21
docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml → docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-api.yaml

@ -14,22 +14,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
{{- if .Values.api.configmap }}
apiVersion: v1 apiVersion: v1
kind: Service kind: ConfigMap
metadata: metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend name: {{ include "dolphinscheduler.fullname" . }}-api
labels: labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
spec: data:
ports: DOLPHINSCHEDULER_OPTS: {{ .Values.api.configmap.DOLPHINSCHEDULER_OPTS | quote }}
- port: 8888 {{- end }}
targetPort: tcp-port
protocol: TCP
name: tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend

8
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml

@ -24,12 +24,14 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
data: data:
DOLPHINSCHEDULER_ENV_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_ENV_PATH | quote }} DOLPHINSCHEDULER_ENV: |-
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH | quote }} {{- range .Values.common.configmap.DOLPHINSCHEDULER_ENV }}
{{ . }}
{{- end }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.data.basedir.path" . | quote }}
RESOURCE_STORAGE_TYPE: {{ .Values.common.configmap.RESOURCE_STORAGE_TYPE | quote }} RESOURCE_STORAGE_TYPE: {{ .Values.common.configmap.RESOURCE_STORAGE_TYPE | quote }}
RESOURCE_UPLOAD_PATH: {{ .Values.common.configmap.RESOURCE_UPLOAD_PATH | quote }} RESOURCE_UPLOAD_PATH: {{ .Values.common.configmap.RESOURCE_UPLOAD_PATH | quote }}
FS_DEFAULT_FS: {{ .Values.common.configmap.FS_DEFAULT_FS | quote }} FS_DEFAULT_FS: {{ .Values.common.configmap.FS_DEFAULT_FS | quote }}
FS_S3A_ENDPOINT: {{ .Values.common.configmap.FS_S3A_ENDPOINT | quote }} FS_S3A_ENDPOINT: {{ .Values.common.configmap.FS_S3A_ENDPOINT | quote }}
FS_S3A_ACCESS_KEY: {{ .Values.common.configmap.FS_S3A_ACCESS_KEY | quote }} FS_S3A_ACCESS_KEY: {{ .Values.common.configmap.FS_S3A_ACCESS_KEY | quote }}
FS_S3A_SECRET_KEY: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | quote }}
{{- end }} {{- end }}

2
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml

@ -24,6 +24,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
data: data:
DOLPHINSCHEDULER_OPTS: {{ .Values.master.configmap.DOLPHINSCHEDULER_OPTS | quote }}
MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }} MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }}
MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }} MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }}
MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }} MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }}
@ -32,5 +33,4 @@ data:
MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }} MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }} MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
MASTER_LISTEN_PORT: {{ .Values.master.configmap.MASTER_LISTEN_PORT | quote }} MASTER_LISTEN_PORT: {{ .Values.master.configmap.MASTER_LISTEN_PORT | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
{{- end }} {{- end }}

7
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml

@ -24,17 +24,12 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
data: data:
DOLPHINSCHEDULER_OPTS: {{ .Values.worker.configmap.DOLPHINSCHEDULER_OPTS | quote }}
WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }} WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }}
WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }} WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }}
WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }} WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }} WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
WORKER_LISTEN_PORT: {{ .Values.worker.configmap.WORKER_LISTEN_PORT | quote }} WORKER_LISTEN_PORT: {{ .Values.worker.configmap.WORKER_LISTEN_PORT | quote }}
WORKER_GROUP: {{ .Values.worker.configmap.WORKER_GROUP | quote }} WORKER_GROUP: {{ .Values.worker.configmap.WORKER_GROUP | quote }}
WORKER_WEIGHT: {{ .Values.worker.configmap.WORKER_WEIGHT | quote }} WORKER_WEIGHT: {{ .Values.worker.configmap.WORKER_WEIGHT | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
dolphinscheduler_env.sh: |-
{{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
{{ . }}
{{- end }}
{{- end }} {{- end }}

68
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml

@ -57,35 +57,6 @@ spec:
{{- if .Values.alert.tolerations }} {{- if .Values.alert.tolerations }}
tolerations: {{- toYaml . | nindent 8 }} tolerations: {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
initContainers:
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }} {{- if .Values.image.pullSecrets }}
imagePullSecrets: imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }} - name: {{ include "dolphinscheduler.image.pullSecrets" . }}
@ -93,14 +64,17 @@ spec:
containers: containers:
- name: {{ include "dolphinscheduler.fullname" . }}-alert - name: {{ include "dolphinscheduler.fullname" . }}-alert
image: {{ include "dolphinscheduler.image.repository" . | quote }} image: {{ include "dolphinscheduler.image.repository" . | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: args:
- "alert-server" - "alert-server"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env: env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.alert.jvmOptions }}
- name: TZ - name: TZ
value: {{ .Values.timezone }} value: {{ .Values.timezone }}
- name: DOLPHINSCHEDULER_OPTS
valueFrom:
configMapKeyRef:
key: DOLPHINSCHEDULER_OPTS
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ALERT_PLUGIN_DIR - name: ALERT_PLUGIN_DIR
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
@ -228,36 +202,6 @@ spec:
{{- else }} {{- else }}
value: {{ .Values.externalDatabase.params | quote }} value: {{ .Values.externalDatabase.params | quote }}
{{- end }} {{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.alert.resources }} {{- if .Values.alert.resources }}
resources: resources:
limits: limits:

48
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml

@ -57,35 +57,6 @@ spec:
{{- if .Values.api.tolerations }} {{- if .Values.api.tolerations }}
tolerations: {{- toYaml . | nindent 8 }} tolerations: {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
initContainers:
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }} {{- if .Values.image.pullSecrets }}
imagePullSecrets: imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }} - name: {{ include "dolphinscheduler.image.pullSecrets" . }}
@ -93,17 +64,20 @@ spec:
containers: containers:
- name: {{ include "dolphinscheduler.fullname" . }}-api - name: {{ include "dolphinscheduler.fullname" . }}-api
image: {{ include "dolphinscheduler.image.repository" . | quote }} image: {{ include "dolphinscheduler.image.repository" . | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: args:
- "api-server" - "api-server"
ports: ports:
- containerPort: 12345 - containerPort: 12345
name: tcp-port name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env: env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.api.jvmOptions }}
- name: TZ - name: TZ
value: {{ .Values.timezone }} value: {{ .Values.timezone }}
- name: DOLPHINSCHEDULER_OPTS
valueFrom:
configMapKeyRef:
key: DOLPHINSCHEDULER_OPTS
name: {{ include "dolphinscheduler.fullname" . }}-api
- name: DATABASE_TYPE - name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }} {{- if .Values.postgresql.enabled }}
value: "postgresql" value: "postgresql"
@ -164,7 +138,7 @@ spec:
{{- end }} {{- end }}
- name: ZOOKEEPER_ROOT - name: ZOOKEEPER_ROOT
{{- if .Values.zookeeper.enabled }} {{- if .Values.zookeeper.enabled }}
value: "/dolphinscheduler" value: {{ .Values.zookeeper.zookeeperRoot }}
{{- else }} {{- else }}
value: {{ .Values.externalZookeeper.zookeeperRoot }} value: {{ .Values.externalZookeeper.zookeeperRoot }}
{{- end }} {{- end }}
@ -183,6 +157,7 @@ spec:
configMapKeyRef: configMapKeyRef:
key: FS_DEFAULT_FS key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }}
- name: FS_S3A_ENDPOINT - name: FS_S3A_ENDPOINT
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
@ -195,9 +170,10 @@ spec:
name: {{ include "dolphinscheduler.fullname" . }}-common name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY - name: FS_S3A_SECRET_KEY
valueFrom: valueFrom:
configMapKeyRef: secretKeyRef:
key: FS_S3A_SECRET_KEY key: fs-s3a-secret-key
name: {{ include "dolphinscheduler.fullname" . }}-common name: {{ printf "%s-%s" .Release.Name "fs-s3a" }}
{{- end }}
{{- if .Values.api.resources }} {{- if .Values.api.resources }}
resources: resources:
limits: limits:

119
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml

@ -1,119 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
spec:
replicas: {{ .Values.frontend.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
strategy:
type: {{ .Values.frontend.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.frontend.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.frontend.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.frontend.affinity }}
affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
{{- end }}
{{- if .Values.frontend.nodeSelector }}
nodeSelector: {{- toYaml .Values.frontend.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.frontend.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "frontend"
ports:
- containerPort: 8888
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: FRONTEND_API_SERVER_HOST
value: '{{ include "dolphinscheduler.fullname" . }}-api'
- name: FRONTEND_API_SERVER_PORT
value: "12345"
{{- if .Values.frontend.resources }}
resources:
limits:
memory: {{ .Values.frontend.resources.limits.memory | quote }}
cpu: {{ .Values.frontend.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.frontend.resources.requests.memory | quote }}
cpu: {{ .Values.frontend.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.frontend.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.frontend.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/var/log/nginx"
name: {{ include "dolphinscheduler.fullname" . }}-frontend
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- else }}
emptyDir: {}
{{- end }}

2
docker/kubernetes/dolphinscheduler/templates/ingress.yaml

@ -30,7 +30,7 @@ spec:
paths: paths:
- path: {{ .Values.ingress.path }} - path: {{ .Values.ingress.path }}
backend: backend:
serviceName: {{ include "dolphinscheduler.fullname" . }}-frontend serviceName: {{ include "dolphinscheduler.fullname" . }}-api
servicePort: tcp-port servicePort: tcp-port
{{- if .Values.ingress.tls.enabled }} {{- if .Values.ingress.tls.enabled }}
tls: tls:

2
docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml

@ -25,7 +25,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
spec: spec:
accessModes: accessModes:
{{- range .Values.alert.persistentVolumeClaim.accessModes }} {{- range .Values.alert.persistentVolumeClaim.accessModes }}
- {{ . | quote }} - {{ . | quote }}
{{- end }} {{- end }}
storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }} storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }}

2
docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml

@ -25,7 +25,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
spec: spec:
accessModes: accessModes:
{{- range .Values.api.persistentVolumeClaim.accessModes }} {{- range .Values.api.persistentVolumeClaim.accessModes }}
- {{ . | quote }} - {{ . | quote }}
{{- end }} {{- end }}
storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }} storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }}

20
docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml → docker/kubernetes/dolphinscheduler/templates/secret-external-fs-s3a.yaml

@ -14,22 +14,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
{{- if .Values.frontend.persistentVolumeClaim.enabled }} {{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }}
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: Secret
metadata: metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend name: {{ printf "%s-%s" .Release.Name "fs-s3a" }}
labels: labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-fs-s3a
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
spec: type: Opaque
accessModes: data:
{{- range .Values.frontend.persistentVolumeClaim.accessModes }} fs-s3a-secret-key: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | b64enc | quote }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.frontend.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.frontend.persistentVolumeClaim.storage | quote }}
{{- end }} {{- end }}

96
docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml

@ -54,59 +54,6 @@ spec:
{{- if .Values.master.tolerations }} {{- if .Values.master.tolerations }}
tolerations: {{- toYaml . | nindent 8 }} tolerations: {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }} {{- if .Values.image.pullSecrets }}
imagePullSecrets: imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }} - name: {{ include "dolphinscheduler.image.pullSecrets" . }}
@ -114,17 +61,20 @@ spec:
containers: containers:
- name: {{ include "dolphinscheduler.fullname" . }}-master - name: {{ include "dolphinscheduler.fullname" . }}-master
image: {{ include "dolphinscheduler.image.repository" . | quote }} image: {{ include "dolphinscheduler.image.repository" . | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: args:
- "master-server" - "master-server"
ports: ports:
- containerPort: {{ .Values.master.configmap.MASTER_LISTEN_PORT }} - containerPort: {{ .Values.master.configmap.MASTER_LISTEN_PORT }}
name: "master-port" name: "master-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env: env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.master.jvmOptions }}
- name: TZ - name: TZ
value: {{ .Values.timezone }} value: {{ .Values.timezone }}
- name: DOLPHINSCHEDULER_OPTS
valueFrom:
configMapKeyRef:
key: DOLPHINSCHEDULER_OPTS
name: {{ include "dolphinscheduler.fullname" . }}-master
- name: MASTER_EXEC_THREADS - name: MASTER_EXEC_THREADS
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
@ -168,7 +118,7 @@ spec:
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH - name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master name: {{ include "dolphinscheduler.fullname" . }}-common
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: DATABASE_TYPE - name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }} {{- if .Values.postgresql.enabled }}
@ -230,40 +180,10 @@ spec:
{{- end }} {{- end }}
- name: ZOOKEEPER_ROOT - name: ZOOKEEPER_ROOT
{{- if .Values.zookeeper.enabled }} {{- if .Values.zookeeper.enabled }}
value: "/dolphinscheduler" value: {{ .Values.zookeeper.zookeeperRoot }}
{{- else }} {{- else }}
value: {{ .Values.externalZookeeper.zookeeperRoot }} value: {{ .Values.externalZookeeper.zookeeperRoot }}
{{- end }} {{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.master.resources }} {{- if .Values.master.resources }}
resources: resources:
limits: limits:

184
docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml

@ -54,59 +54,6 @@ spec:
{{- if .Values.worker.tolerations }} {{- if .Values.worker.tolerations }}
tolerations: {{- toYaml . | nindent 8 }} tolerations: {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }} {{- if .Values.image.pullSecrets }}
imagePullSecrets: imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }} - name: {{ include "dolphinscheduler.image.pullSecrets" . }}
@ -114,29 +61,27 @@ spec:
containers: containers:
- name: {{ include "dolphinscheduler.fullname" . }}-worker - name: {{ include "dolphinscheduler.fullname" . }}-worker
image: {{ include "dolphinscheduler.image.repository" . | quote }} image: {{ include "dolphinscheduler.image.repository" . | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: args:
- "worker-server" - "worker-server"
ports: ports:
- containerPort: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }} - containerPort: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }}
name: "worker-port" name: "worker-port"
- containerPort: 50051 - containerPort: 50051
name: "logs-port" name: "logger-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env: env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.worker.jvmOptions }}
- name: TZ - name: TZ
value: {{ .Values.timezone }} value: {{ .Values.timezone }}
- name: WORKER_EXEC_THREADS - name: DOLPHINSCHEDULER_OPTS
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
key: DOLPHINSCHEDULER_OPTS
name: {{ include "dolphinscheduler.fullname" . }}-worker name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_EXEC_THREADS - name: WORKER_EXEC_THREADS
- name: WORKER_FETCH_TASK_NUM
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_FETCH_TASK_NUM key: WORKER_EXEC_THREADS
- name: WORKER_HEARTBEAT_INTERVAL - name: WORKER_HEARTBEAT_INTERVAL
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
@ -170,8 +115,58 @@ spec:
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH - name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master name: {{ include "dolphinscheduler.fullname" . }}-common
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: ALERT_PLUGIN_DIR
valueFrom:
configMapKeyRef:
key: ALERT_PLUGIN_DIR
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
key: XLS_FILE_PATH
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_HOST
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_HOST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_PORT
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_PORT
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SENDER
valueFrom:
configMapKeyRef:
key: MAIL_SENDER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_USER
valueFrom:
configMapKeyRef:
key: MAIL_USER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_PASSWD
valueFrom:
configMapKeyRef:
key: MAIL_PASSWD
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_STARTTLS_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_STARTTLS_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_TRUST
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_TRUST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: DATABASE_TYPE - name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }} {{- if .Values.postgresql.enabled }}
value: "postgresql" value: "postgresql"
@ -232,7 +227,7 @@ spec:
{{- end }} {{- end }}
- name: ZOOKEEPER_ROOT - name: ZOOKEEPER_ROOT
{{- if .Values.zookeeper.enabled }} {{- if .Values.zookeeper.enabled }}
value: "/dolphinscheduler" value: {{ .Values.zookeeper.zookeeperRoot }}
{{- else }} {{- else }}
value: {{ .Values.externalZookeeper.zookeeperRoot }} value: {{ .Values.externalZookeeper.zookeeperRoot }}
{{- end }} {{- end }}
@ -251,6 +246,7 @@ spec:
configMapKeyRef: configMapKeyRef:
key: FS_DEFAULT_FS key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }}
- name: FS_S3A_ENDPOINT - name: FS_S3A_ENDPOINT
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
@ -263,54 +259,10 @@ spec:
name: {{ include "dolphinscheduler.fullname" . }}-common name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY - name: FS_S3A_SECRET_KEY
valueFrom: valueFrom:
configMapKeyRef: secretKeyRef:
key: FS_S3A_SECRET_KEY key: fs-s3a-secret-key
name: {{ include "dolphinscheduler.fullname" . }}-common name: {{ printf "%s-%s" .Release.Name "fs-s3a" }}
- name: XLS_FILE_PATH {{- end }}
valueFrom:
configMapKeyRef:
key: XLS_FILE_PATH
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_HOST
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_HOST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_PORT
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_PORT
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SENDER
valueFrom:
configMapKeyRef:
key: MAIL_SENDER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_USER
valueFrom:
configMapKeyRef:
key: MAIL_USER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_PASSWD
valueFrom:
configMapKeyRef:
key: MAIL_PASSWD
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_STARTTLS_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_STARTTLS_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_TRUST
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_TRUST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ENTERPRISE_WECHAT_ENABLE - name: ENTERPRISE_WECHAT_ENABLE
valueFrom: valueFrom:
configMapKeyRef: configMapKeyRef:
@ -372,13 +324,13 @@ spec:
failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }} failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }}
{{- end }} {{- end }}
volumeMounts: volumeMounts:
- mountPath: {{ include "dolphinscheduler.worker.base.dir" . | quote }} - mountPath: {{ include "dolphinscheduler.data.basedir.path" . | quote }}
name: {{ include "dolphinscheduler.fullname" . }}-worker-data name: {{ include "dolphinscheduler.fullname" . }}-worker-data
- mountPath: "/opt/dolphinscheduler/logs" - mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
- mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh" - mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"
subPath: "dolphinscheduler_env.sh" subPath: "dolphinscheduler_env.sh"
name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap name: {{ include "dolphinscheduler.fullname" . }}-common-env
volumes: volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-worker-data - name: {{ include "dolphinscheduler.fullname" . }}-worker-data
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }} {{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
@ -394,12 +346,12 @@ spec:
{{- else }} {{- else }}
emptyDir: {} emptyDir: {}
{{- end }} {{- end }}
- name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap - name: {{ include "dolphinscheduler.fullname" . }}-common-env
configMap: configMap:
defaultMode: 0777 defaultMode: 0777
name: {{ include "dolphinscheduler.fullname" . }}-worker name: {{ include "dolphinscheduler.fullname" . }}-common
items: items:
- key: dolphinscheduler_env.sh - key: DOLPHINSCHEDULER_ENV
path: dolphinscheduler_env.sh path: dolphinscheduler_env.sh
{{- if .Values.worker.persistentVolumeClaim.enabled }} {{- if .Values.worker.persistentVolumeClaim.enabled }}
volumeClaimTemplates: volumeClaimTemplates:

4
docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml

@ -30,9 +30,9 @@ spec:
protocol: TCP protocol: TCP
name: worker-port name: worker-port
- port: 50051 - port: 50051
targetPort: logs-port targetPort: logger-port
protocol: TCP protocol: TCP
name: logs-port name: logger-port
selector: selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}

310
docker/kubernetes/dolphinscheduler/values.yaml

@ -58,62 +58,74 @@ externalDatabase:
# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it. # If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
zookeeper: zookeeper:
enabled: true enabled: true
taskQueue: "zookeeper" fourlwCommandsWhitelist: srvr,ruok,wchs,cons
config: null
service: service:
port: "2181" port: "2181"
persistence: persistence:
enabled: false enabled: false
size: "20Gi" size: "20Gi"
storageClass: "-" storageClass: "-"
zookeeperRoot: "/dolphinscheduler"
# If exists external zookeeper, and set zookeeper.enable value to false. # If exists external zookeeper, and set zookeeper.enable value to false.
# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it. # If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
externalZookeeper: externalZookeeper:
taskQueue: "zookeeper"
zookeeperQuorum: "127.0.0.1:2181" zookeeperQuorum: "127.0.0.1:2181"
zookeeperRoot: "/dolphinscheduler" zookeeperRoot: "/dolphinscheduler"
common: common:
## ConfigMap
configmap: configmap:
DOLPHINSCHEDULER_ENV_PATH: "/tmp/dolphinscheduler/env" DOLPHINSCHEDULER_ENV:
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler/files" - "export HADOOP_HOME=/opt/soft/hadoop"
RESOURCE_STORAGE_TYPE: "NONE" - "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
RESOURCE_UPLOAD_PATH: "/ds" - "export SPARK_HOME1=/opt/soft/spark1"
FS_DEFAULT_FS: "s3a://xxxx" - "export SPARK_HOME2=/opt/soft/spark2"
- "export PYTHON_HOME=/usr/bin/python"
- "export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk"
- "export HIVE_HOME=/opt/soft/hive"
- "export FLINK_HOME=/opt/soft/flink"
- "export DATAX_HOME=/opt/soft/datax/bin/datax.py"
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
RESOURCE_STORAGE_TYPE: "HDFS"
RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
FS_DEFAULT_FS: "file:///"
FS_S3A_ENDPOINT: "s3.xxx.amazonaws.com" FS_S3A_ENDPOINT: "s3.xxx.amazonaws.com"
FS_S3A_ACCESS_KEY: "xxxxxxx" FS_S3A_ACCESS_KEY: "xxxxxxx"
FS_S3A_SECRET_KEY: "xxxxxxx" FS_S3A_SECRET_KEY: "xxxxxxx"
master: master:
## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
podManagementPolicy: "Parallel" podManagementPolicy: "Parallel"
## Replicas is the desired number of replicas of the given Template.
replicas: "3" replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
# Selector which must match a node's labels for the pod to be scheduled on that node. ## Clients such as tools and libraries can retrieve this metadata.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ annotations: {}
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## NodeSelector is a selector which must be true for the pod to fit on a node.
## Selector which must match a node's labels for the pod to be scheduled on that node.
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {} nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: [] tolerations: []
# Affinity is a group of affinity scheduling rules. ## Compute Resources required by this container. Cannot be updated.
# If specified, the pod's scheduling constraints. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {} resources: {}
# limits: # resources:
# memory: "18Gi" # limits:
# cpu: "4" # memory: "8Gi"
# requests: # cpu: "4"
# memory: "2Gi" # requests:
# cpu: "500m" # memory: "2Gi"
# You can use annotations to attach arbitrary non-identifying metadata to objects. # cpu: "500m"
# Clients such as tools and libraries can retrieve this metadata. ## ConfigMap
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap: configmap:
DOLPHINSCHEDULER_OPTS: ""
MASTER_EXEC_THREADS: "100" MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20" MASTER_EXEC_TASK_NUM: "20"
MASTER_HEARTBEAT_INTERVAL: "10" MASTER_HEARTBEAT_INTERVAL: "10"
@ -122,6 +134,8 @@ master:
MASTER_MAX_CPULOAD_AVG: "100" MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1" MASTER_RESERVED_MEMORY: "0.1"
MASTER_LISTEN_PORT: "5678" MASTER_LISTEN_PORT: "5678"
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe: livenessProbe:
enabled: true enabled: true
initialDelaySeconds: "30" initialDelaySeconds: "30"
@ -138,7 +152,7 @@ master:
timeoutSeconds: "5" timeoutSeconds: "5"
failureThreshold: "3" failureThreshold: "3"
successThreshold: "1" successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name. ## A claim in this list takes precedence over any volumes in the template, with the same name.
@ -150,31 +164,43 @@ master:
storage: "20Gi" storage: "20Gi"
worker: worker:
## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
podManagementPolicy: "Parallel" podManagementPolicy: "Parallel"
## Replicas is the desired number of replicas of the given Template.
replicas: "3" replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
# Selector which must match a node's labels for the pod to be scheduled on that node. ## Clients such as tools and libraries can retrieve this metadata.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ annotations: {}
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## NodeSelector is a selector which must be true for the pod to fit on a node.
## Selector which must match a node's labels for the pod to be scheduled on that node.
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {} nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: [] tolerations: []
# Affinity is a group of affinity scheduling rules. ## Compute Resources required by this container. Cannot be updated.
# If specified, the pod's scheduling constraints. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {} resources: {}
# limits: # resources:
# memory: "18Gi" # limits:
# cpu: "4" # memory: "8Gi"
# requests: # cpu: "4"
# memory: "2Gi" # requests:
# cpu: "500m" # memory: "2Gi"
# You can use annotations to attach arbitrary non-identifying metadata to objects. # cpu: "500m"
# Clients such as tools and libraries can retrieve this metadata. ## ConfigMap
annotations: {} configmap:
DOLPHINSCHEDULER_OPTS: ""
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
WORKER_LISTEN_PORT: "1234"
WORKER_GROUP: "default"
WORKER_WEIGHT: "100"
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe: livenessProbe:
@ -193,27 +219,7 @@ worker:
timeoutSeconds: "5" timeoutSeconds: "5"
failureThreshold: "3" failureThreshold: "3"
successThreshold: "1" successThreshold: "1"
configmap: ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
WORKER_LISTEN_PORT: "1234"
WORKER_GROUP: "default"
WORKER_WEIGHT: "100"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
DOLPHINSCHEDULER_ENV:
- "export HADOOP_HOME=/opt/soft/hadoop"
- "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
- "export SPARK_HOME1=/opt/soft/spark1"
- "export SPARK_HOME2=/opt/soft/spark2"
- "export PYTHON_HOME=/opt/soft/python"
- "export JAVA_HOME=/opt/soft/java"
- "export HIVE_HOME=/opt/soft/hive"
- "export FLINK_HOME=/opt/soft/flink"
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name. ## A claim in this list takes precedence over any volumes in the template, with the same name.
@ -235,38 +241,40 @@ worker:
storage: "20Gi" storage: "20Gi"
alert: alert:
## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
replicas: "1"
## The deployment strategy to use to replace existing pods with new ones.
strategy: strategy:
type: "RollingUpdate" type: "RollingUpdate"
rollingUpdate: rollingUpdate:
maxSurge: "25%" maxSurge: "25%"
maxUnavailable: "25%" maxUnavailable: "25%"
replicas: "1" ## You can use annotations to attach arbitrary non-identifying metadata to objects.
# NodeSelector is a selector which must be true for the pod to fit on a node. ## Clients such as tools and libraries can retrieve this metadata.
# Selector which must match a node's labels for the pod to be scheduled on that node. annotations: {}
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## NodeSelector is a selector which must be true for the pod to fit on a node.
## Selector which must match a node's labels for the pod to be scheduled on that node.
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity: {}
## Compute Resources required by this container. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
nodeSelector: {} nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: [] tolerations: []
# Affinity is a group of affinity scheduling rules. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
# If specified, the pod's scheduling constraints. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {} resources: {}
# limits: # resources:
# memory: "4Gi" # limits:
# cpu: "1" # memory: "2Gi"
# requests: # cpu: "1"
# memory: "2Gi" # requests:
# cpu: "500m" # memory: "1Gi"
# You can use annotations to attach arbitrary non-identifying metadata to objects. # cpu: "500m"
# Clients such as tools and libraries can retrieve this metadata. ## ConfigMap
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
configmap: configmap:
DOLPHINSCHEDULER_OPTS: ""
ALERT_PLUGIN_DIR: "/opt/dolphinscheduler/alert/plugin" ALERT_PLUGIN_DIR: "/opt/dolphinscheduler/alert/plugin"
XLS_FILE_PATH: "/tmp/xls" XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: "" MAIL_SERVER_HOST: ""
@ -282,63 +290,6 @@ alert:
ENTERPRISE_WECHAT_SECRET: "" ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: "" ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: "" ENTERPRISE_WECHAT_USERS: ""
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
api:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "4Gi"
# cpu: "2"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe: livenessProbe:
@ -357,10 +308,8 @@ api:
timeoutSeconds: "5" timeoutSeconds: "5"
failureThreshold: "3" failureThreshold: "3"
successThreshold: "1" successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim: persistentVolumeClaim:
enabled: false enabled: false
accessModes: accessModes:
@ -368,34 +317,41 @@ api:
storageClassName: "-" storageClassName: "-"
storage: "20Gi" storage: "20Gi"
frontend: api:
## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
replicas: "1"
## The deployment strategy to use to replace existing pods with new ones.
strategy: strategy:
type: "RollingUpdate" type: "RollingUpdate"
rollingUpdate: rollingUpdate:
maxSurge: "25%" maxSurge: "25%"
maxUnavailable: "25%" maxUnavailable: "25%"
replicas: "1" ## You can use annotations to attach arbitrary non-identifying metadata to objects.
# NodeSelector is a selector which must be true for the pod to fit on a node. ## Clients such as tools and libraries can retrieve this metadata.
# Selector which must match a node's labels for the pod to be scheduled on that node. annotations: {}
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## NodeSelector is a selector which must be true for the pod to fit on a node.
## Selector which must match a node's labels for the pod to be scheduled on that node.
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity: {}
## Compute Resources required by this container. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
nodeSelector: {} nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: [] tolerations: []
# Affinity is a group of affinity scheduling rules. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
# If specified, the pod's scheduling constraints. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
resources: {} resources: {}
# limits: # resources:
# memory: "256Mi" # limits:
# memory: "2Gi"
# cpu: "1" # cpu: "1"
# requests: # requests:
# memory: "256Mi" # memory: "1Gi"
# cpu: "500m" # cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects. ## ConfigMap
# Clients such as tools and libraries can retrieve this metadata. configmap:
annotations: {} DOLPHINSCHEDULER_OPTS: ""
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe: livenessProbe:
@ -414,10 +370,8 @@ frontend:
timeoutSeconds: "5" timeoutSeconds: "5"
failureThreshold: "3" failureThreshold: "3"
successThreshold: "1" successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim: persistentVolumeClaim:
enabled: false enabled: false
accessModes: accessModes:

8
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java

@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.common;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -1047,4 +1048,11 @@ public final class Constants {
* snow flake, data center id, this id must be greater than 0 and less than 32 * snow flake, data center id, this id must be greater than 0 and less than 32
*/ */
public static final String SNOW_FLAKE_DATA_CENTER_ID = "data.center.id"; public static final String SNOW_FLAKE_DATA_CENTER_ID = "data.center.id";
/**
* docker & kubernetes
*/
public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER"));
public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT"));
} }

26
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/NetUtils.java

@ -49,8 +49,6 @@ public class NetUtils {
private static final String NETWORK_PRIORITY_INNER = "inner"; private static final String NETWORK_PRIORITY_INNER = "inner";
private static final String NETWORK_PRIORITY_OUTER = "outer"; private static final String NETWORK_PRIORITY_OUTER = "outer";
private static final Logger logger = LoggerFactory.getLogger(NetUtils.class); private static final Logger logger = LoggerFactory.getLogger(NetUtils.class);
private static final String ANY_HOST_VALUE = "0.0.0.0";
private static final String LOCAL_HOST_VALUE = "127.0.0.1";
private static InetAddress LOCAL_ADDRESS = null; private static InetAddress LOCAL_ADDRESS = null;
private static volatile String HOST_ADDRESS; private static volatile String HOST_ADDRESS;
@ -58,6 +56,22 @@ public class NetUtils {
throw new UnsupportedOperationException("Construct NetUtils"); throw new UnsupportedOperationException("Construct NetUtils");
} }
/**
* get addr like host:port
* @return addr
*/
public static String getAddr(String host, int port) {
return String.format("%s:%d", host, port);
}
/**
* get addr like host:port
* @return addr
*/
public static String getAddr(int port) {
return getAddr(getHost(), port);
}
public static String getHost() { public static String getHost() {
if (HOST_ADDRESS != null) { if (HOST_ADDRESS != null) {
return HOST_ADDRESS; return HOST_ADDRESS;
@ -65,10 +79,10 @@ public class NetUtils {
InetAddress address = getLocalAddress(); InetAddress address = getLocalAddress();
if (address != null) { if (address != null) {
HOST_ADDRESS = address.getHostAddress(); HOST_ADDRESS = Constants.KUBERNETES_MODE ? address.getHostName() : address.getHostAddress();
return HOST_ADDRESS; return HOST_ADDRESS;
} }
return LOCAL_HOST_VALUE; return Constants.KUBERNETES_MODE ? "localhost" : "127.0.0.1";
} }
private static InetAddress getLocalAddress() { private static InetAddress getLocalAddress() {
@ -153,8 +167,8 @@ public class NetUtils {
String name = address.getHostAddress(); String name = address.getHostAddress();
return (name != null return (name != null
&& IP_PATTERN.matcher(name).matches() && IP_PATTERN.matcher(name).matches()
&& !ANY_HOST_VALUE.equals(name) && !address.isAnyLocalAddress()
&& !LOCAL_HOST_VALUE.equals(name)); && !address.isLoopbackAddress());
} }
/** /**

9
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/NetUtilsTest.java

@ -29,6 +29,13 @@ import static org.mockito.Mockito.when;
*/ */
public class NetUtilsTest { public class NetUtilsTest {
@Test
public void testGetAddr() {
assertEquals(NetUtils.getHost() + ":5678", NetUtils.getAddr(5678));
assertEquals("127.0.0.1:5678", NetUtils.getAddr("127.0.0.1", 5678));
assertEquals("localhost:1234", NetUtils.getAddr("localhost", 1234));
}
@Test @Test
public void testGetLocalHost() { public void testGetLocalHost() {
assertNotNull(NetUtils.getHost()); assertNotNull(NetUtils.getHost());
@ -45,9 +52,11 @@ public class NetUtilsTest {
assertFalse(NetUtils.isValidV4Address(address)); assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class); address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("0.0.0.0"); when(address.getHostAddress()).thenReturn("0.0.0.0");
when(address.isAnyLocalAddress()).thenReturn(true);
assertFalse(NetUtils.isValidV4Address(address)); assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class); address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("127.0.0.1"); when(address.getHostAddress()).thenReturn("127.0.0.1");
when(address.isLoopbackAddress()).thenReturn(true);
assertFalse(NetUtils.isValidV4Address(address)); assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class); address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("1.2.3.4"); when(address.getHostAddress()).thenReturn("1.2.3.4");

6
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java

@ -99,12 +99,6 @@ public class OSUtilsTest {
Assert.assertNotEquals(0, processId); Assert.assertNotEquals(0, processId);
} }
@Test @Test
public void getHost(){
String host = NetUtils.getHost();
Assert.assertNotNull(host);
Assert.assertNotEquals("", host);
}
@Test
public void checkResource(){ public void checkResource(){
boolean resource = OSUtils.checkResource(100,0); boolean resource = OSUtils.checkResource(100,0);
Assert.assertTrue(resource); Assert.assertTrue(resource);

1
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/builder/TaskExecutionContextBuilder.java

@ -44,7 +44,6 @@ public class TaskExecutionContextBuilder {
taskExecutionContext.setStartTime(taskInstance.getStartTime()); taskExecutionContext.setStartTime(taskInstance.getStartTime());
taskExecutionContext.setTaskType(taskInstance.getTaskType()); taskExecutionContext.setTaskType(taskInstance.getTaskType());
taskExecutionContext.setLogPath(taskInstance.getLogPath()); taskExecutionContext.setLogPath(taskInstance.getLogPath());
taskExecutionContext.setExecutePath(taskInstance.getExecutePath());
taskExecutionContext.setTaskJson(taskInstance.getTaskJson()); taskExecutionContext.setTaskJson(taskInstance.getTaskJson());
taskExecutionContext.setWorkerGroup(taskInstance.getWorkerGroup()); taskExecutionContext.setWorkerGroup(taskInstance.getWorkerGroup());
taskExecutionContext.setHost(taskInstance.getHost()); taskExecutionContext.setHost(taskInstance.getHost());

14
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java

@ -35,7 +35,6 @@ import org.apache.dolphinscheduler.common.task.sqoop.targets.TargetMysqlParamete
import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.EnumUtils; import org.apache.dolphinscheduler.common.utils.EnumUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
@ -222,7 +221,6 @@ public class TaskPriorityQueueConsumer extends Thread {
String userQueue = processService.queryUserQueueByProcessInstanceId(taskInstance.getProcessInstanceId()); String userQueue = processService.queryUserQueueByProcessInstanceId(taskInstance.getProcessInstanceId());
taskInstance.getProcessInstance().setQueue(StringUtils.isEmpty(userQueue) ? tenant.getQueue() : userQueue); taskInstance.getProcessInstance().setQueue(StringUtils.isEmpty(userQueue) ? tenant.getQueue() : userQueue);
taskInstance.getProcessInstance().setTenantCode(tenant.getTenantCode()); taskInstance.getProcessInstance().setTenantCode(tenant.getTenantCode());
taskInstance.setExecutePath(getExecLocalPath(taskInstance));
taskInstance.setResources(getResourceFullNames(taskNode)); taskInstance.setResources(getResourceFullNames(taskNode));
SQLTaskExecutionContext sqlTaskExecutionContext = new SQLTaskExecutionContext(); SQLTaskExecutionContext sqlTaskExecutionContext = new SQLTaskExecutionContext();
@ -363,18 +361,6 @@ public class TaskPriorityQueueConsumer extends Thread {
} }
} }
/**
* get execute local path
*
* @return execute local path
*/
private String getExecLocalPath(TaskInstance taskInstance) {
return FileUtils.getProcessExecDir(taskInstance.getProcessDefine().getProjectId(),
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId());
}
/** /**
* whehter tenant is null * whehter tenant is null
* *

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java

@ -135,7 +135,7 @@ public class MasterRegistry {
*/ */
private String getLocalAddress() { private String getLocalAddress() {
return NetUtils.getHost() + ":" + masterConfig.getListenPort(); return NetUtils.getAddr(masterConfig.getListenPort());
} }

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/ConditionsTaskExecThread.java

@ -125,7 +125,7 @@ public class ConditionsTaskExecThread extends MasterBaseTaskExecThread {
private void initTaskParameters() { private void initTaskParameters() {
this.taskInstance.setLogPath(LogUtils.getTaskLogPath(taskInstance)); this.taskInstance.setLogPath(LogUtils.getTaskLogPath(taskInstance));
this.taskInstance.setHost(NetUtils.getHost() + Constants.COLON + masterConfig.getListenPort()); this.taskInstance.setHost(NetUtils.getAddr(masterConfig.getListenPort()));
taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION); taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
taskInstance.setStartTime(new Date()); taskInstance.setStartTime(new Date());
this.processService.saveTaskInstance(taskInstance); this.processService.saveTaskInstance(taskInstance);

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/DependentTaskExecThread.java

@ -185,7 +185,7 @@ public class DependentTaskExecThread extends MasterBaseTaskExecThread {
private void initTaskParameters() { private void initTaskParameters() {
taskInstance.setLogPath(LogUtils.getTaskLogPath(taskInstance)); taskInstance.setLogPath(LogUtils.getTaskLogPath(taskInstance));
taskInstance.setHost(NetUtils.getHost() + Constants.COLON + masterConfig.getListenPort()); taskInstance.setHost(NetUtils.getAddr(masterConfig.getListenPort()));
taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION); taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
taskInstance.setStartTime(new Date()); taskInstance.setStartTime(new Date());
processService.updateTaskInstance(taskInstance); processService.updateTaskInstance(taskInstance);

4
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterSchedulerService.java

@ -178,7 +178,7 @@ public class MasterSchedulerService extends Thread {
} }
} }
private String getLocalAddress(){ private String getLocalAddress() {
return NetUtils.getHost() + ":" + masterConfig.getListenPort(); return NetUtils.getAddr(masterConfig.getListenPort());
} }
} }

3
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java

@ -139,7 +139,7 @@ public class TaskExecuteProcessor implements NettyRequestProcessor {
taskExecutionContext.getProcessInstanceId(), taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId())); taskExecutionContext.getTaskInstanceId()));
taskExecutionContext.setHost(NetUtils.getHost() + ":" + workerConfig.getListenPort()); taskExecutionContext.setHost(NetUtils.getAddr(workerConfig.getListenPort()));
taskExecutionContext.setStartTime(new Date()); taskExecutionContext.setStartTime(new Date());
taskExecutionContext.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext)); taskExecutionContext.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION); taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
@ -147,6 +147,7 @@ public class TaskExecuteProcessor implements NettyRequestProcessor {
// local execute path // local execute path
String execLocalPath = getExecLocalPath(taskExecutionContext); String execLocalPath = getExecLocalPath(taskExecutionContext);
logger.info("task instance local execute path : {} ", execLocalPath); logger.info("task instance local execute path : {} ", execLocalPath);
taskExecutionContext.setExecutePath(execLocalPath);
FileUtils.taskLoggerThreadLocal.set(taskLogger); FileUtils.taskLoggerThreadLocal.set(taskLogger);
try { try {

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java

@ -169,7 +169,7 @@ public class WorkerRegistry {
* get local address * get local address
*/ */
private String getLocalAddress() { private String getLocalAddress() {
return NetUtils.getHost() + COLON + workerConfig.getListenPort(); return NetUtils.getAddr(workerConfig.getListenPort());
} }
/** /**

4
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/dispatch/executor/NettyExecutorManagerTest.java

@ -79,7 +79,7 @@ public class NettyExecutorManagerTest {
.buildProcessDefinitionRelatedInfo(processDefinition) .buildProcessDefinitionRelatedInfo(processDefinition)
.create(); .create();
ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER); ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER);
executionContext.setHost(Host.of(NetUtils.getHost() + ":" + serverConfig.getListenPort())); executionContext.setHost(Host.of(NetUtils.getAddr(serverConfig.getListenPort())));
Boolean execute = nettyExecutorManager.execute(executionContext); Boolean execute = nettyExecutorManager.execute(executionContext);
Assert.assertTrue(execute); Assert.assertTrue(execute);
nettyRemotingServer.close(); nettyRemotingServer.close();
@ -98,7 +98,7 @@ public class NettyExecutorManagerTest {
.buildProcessDefinitionRelatedInfo(processDefinition) .buildProcessDefinitionRelatedInfo(processDefinition)
.create(); .create();
ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER); ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER);
executionContext.setHost(Host.of(NetUtils.getHost() + ":4444")); executionContext.setHost(Host.of(NetUtils.getAddr(4444)));
nettyExecutorManager.execute(executionContext); nettyExecutorManager.execute(executionContext);
} }

4
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/registry/ZookeeperNodeManagerTest.java

@ -75,7 +75,7 @@ public class ZookeeperNodeManagerTest {
Set<String> masterNodes = zookeeperNodeManager.getMasterNodes(); Set<String> masterNodes = zookeeperNodeManager.getMasterNodes();
Assert.assertTrue(CollectionUtils.isNotEmpty(masterNodes)); Assert.assertTrue(CollectionUtils.isNotEmpty(masterNodes));
Assert.assertEquals(1, masterNodes.size()); Assert.assertEquals(1, masterNodes.size());
Assert.assertEquals(NetUtils.getHost() + ":" + masterConfig.getListenPort(), masterNodes.iterator().next()); Assert.assertEquals(NetUtils.getAddr(masterConfig.getListenPort()), masterNodes.iterator().next());
workerRegistry.unRegistry(); workerRegistry.unRegistry();
} }
@ -105,7 +105,7 @@ public class ZookeeperNodeManagerTest {
Set<String> workerNodes = zookeeperNodeManager.getWorkerGroupNodes("default"); Set<String> workerNodes = zookeeperNodeManager.getWorkerGroupNodes("default");
Assert.assertTrue(CollectionUtils.isNotEmpty(workerNodes)); Assert.assertTrue(CollectionUtils.isNotEmpty(workerNodes));
Assert.assertEquals(1, workerNodes.size()); Assert.assertEquals(1, workerNodes.size());
Assert.assertEquals(NetUtils.getHost() + ":" + workerConfig.getListenPort(), workerNodes.iterator().next()); Assert.assertEquals(NetUtils.getAddr(workerConfig.getListenPort()), workerNodes.iterator().next());
workerRegistry.unRegistry(); workerRegistry.unRegistry();
} }
} }

2
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/ExecutionContextTestUtils.java

@ -47,7 +47,7 @@ public class ExecutionContextTestUtils {
.buildProcessDefinitionRelatedInfo(processDefinition) .buildProcessDefinitionRelatedInfo(processDefinition)
.create(); .create();
ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER); ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER);
executionContext.setHost(Host.of(NetUtils.getHost() + ":" + port)); executionContext.setHost(Host.of(NetUtils.getAddr(port)));
return executionContext; return executionContext;
} }

2
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistryTest.java

@ -113,7 +113,7 @@ public class WorkerRegistryTest {
int i = 0; int i = 0;
for (String workerGroup : workerConfig.getWorkerGroups()) { for (String workerGroup : workerConfig.getWorkerGroups()) {
String workerZkPath = workerPath + "/" + workerGroup.trim() + "/" + (NetUtils.getHost() + ":" + workerConfig.getListenPort()); String workerZkPath = workerPath + "/" + workerGroup.trim() + "/" + (NetUtils.getAddr(workerConfig.getListenPort()));
String heartbeat = zookeeperRegistryCenter.getZookeeperCachedOperator().get(workerZkPath); String heartbeat = zookeeperRegistryCenter.getZookeeperCachedOperator().get(workerZkPath);
if (0 == i) { if (0 == i) {
Assert.assertTrue(workerZkPath.startsWith("/dolphinscheduler/nodes/worker/test/")); Assert.assertTrue(workerZkPath.startsWith("/dolphinscheduler/nodes/worker/test/"));

0
dolphinscheduler-ui/install-dolphinscheduler-ui.sh

6
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/udp/udp.vue

@ -213,10 +213,10 @@
this.timeout = dag.timeout || 0 this.timeout = dag.timeout || 0
this.checkedTimeout = this.timeout !== 0 this.checkedTimeout = this.timeout !== 0
this.$nextTick(() => { this.$nextTick(() => {
if (dag.tenantId === -1) { if (dag.tenantId > -1) {
this.tenantId = this.store.state.user.userInfo.tenantId
} else {
this.tenantId = dag.tenantId this.tenantId = dag.tenantId
} else if (this.store.state.user.userInfo.tenantId) {
this.tenantId = this.store.state.user.userInfo.tenantId
} }
}) })
}, },

22
dolphinscheduler-ui/src/js/conf/home/pages/monitor/pages/servers/master.vue

@ -21,8 +21,8 @@
<div class="row-box" v-for="(item,$index) in masterList" :key="$index"> <div class="row-box" v-for="(item,$index) in masterList" :key="$index">
<div class="row-title"> <div class="row-title">
<div class="left"> <div class="left">
<span class="sp">IP: {{item.host}}</span> <span class="sp">Host: {{item.host}}</span>
<span class="sp">{{$t('Zk registration directory')}}: {{item.zkDirectory}}</span> <span>{{$t('Zk registration directory')}}: <a href="javascript:" @click="_showZkDirectories(item)" class="links">{{$t('Directory detail')}}</a></span>
</div> </div>
<div class="right"> <div class="right">
<span class="sp">{{$t('Create Time')}}: {{item.createTime | formatDate}}</span> <span class="sp">{{$t('Create Time')}}: {{item.createTime | formatDate}}</span>
@ -57,6 +57,11 @@
</div> </div>
</div> </div>
</div> </div>
<el-drawer
:visible.sync="drawer"
:with-header="false">
<zookeeper-directories-popup :zkDirectories = zkDirectories></zookeeper-directories-popup>
</el-drawer>
<div v-if="!masterList.length"> <div v-if="!masterList.length">
<m-no-data></m-no-data> <m-no-data></m-no-data>
</div> </div>
@ -72,6 +77,7 @@
import mNoData from '@/module/components/noData/noData' import mNoData from '@/module/components/noData/noData'
import themeData from '@/module/echarts/themeData.json' import themeData from '@/module/echarts/themeData.json'
import mListConstruction from '@/module/components/listConstruction/listConstruction' import mListConstruction from '@/module/components/listConstruction/listConstruction'
import zookeeperDirectoriesPopup from './_source/zookeeperDirectories'
export default { export default {
name: 'servers-master', name: 'servers-master',
@ -79,12 +85,18 @@
return { return {
isLoading: false, isLoading: false,
masterList: [], masterList: [],
color: themeData.color color: themeData.color,
drawer: false,
zkDirectories: []
} }
}, },
props: {}, props: {},
methods: { methods: {
...mapActions('monitor', ['getMasterData']) ...mapActions('monitor', ['getMasterData']),
_showZkDirectories (item) {
this.zkDirectories = [{ zkDirectory: item.zkDirectory }]
this.drawer = true
}
}, },
watch: {}, watch: {},
created () { created () {
@ -103,7 +115,7 @@
this.isLoading = false this.isLoading = false
}) })
}, },
components: { mListConstruction, mSpin, mNoData, mGauge } components: { mListConstruction, mSpin, mNoData, mGauge, zookeeperDirectoriesPopup }
} }
</script> </script>
<style lang="scss" rel="stylesheet/scss"> <style lang="scss" rel="stylesheet/scss">

2
dolphinscheduler-ui/src/js/conf/home/pages/monitor/pages/servers/worker.vue

@ -21,7 +21,7 @@
<div class="row-box" v-for="(item,$index) in workerList" :key="$index"> <div class="row-box" v-for="(item,$index) in workerList" :key="$index">
<div class="row-title"> <div class="row-title">
<div class="left"> <div class="left">
<span class="sp">IP: {{item.host}}</span> <span class="sp">Host: {{item.host}}</span>
<span>{{$t('Zk registration directory')}}: <a href="javascript:" @click="_showZkDirectories(item)" class="links">{{$t('Directory detail')}}</a></span> <span>{{$t('Zk registration directory')}}: <a href="javascript:" @click="_showZkDirectories(item)" class="links">{{$t('Directory detail')}}</a></span>
</div> </div>
<div class="right"> <div class="right">

2
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/_source/list.vue

@ -76,7 +76,7 @@
:show-close="false" :show-close="false"
:visible.sync="logDialog" :visible.sync="logDialog"
width="auto"> width="auto">
<m-log :item="item" :source="source" :logId="logId" @ok="ok" @close="close"></m-log> <m-log :key="logId" :item="item" :source="source" :logId="logId" @ok="ok" @close="close"></m-log>
</el-dialog> </el-dialog>
</div> </div>
</template> </template>

4
dolphinscheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue

@ -293,7 +293,7 @@
this.email = this.item.email this.email = this.item.email
this.phone = this.item.phone this.phone = this.item.phone
this.userState = this.item.state + '' || '1' this.userState = this.item.state + '' || '1'
if (this.item.tenantName) { if (this.item.tenantId) {
this.tenantId = this.item.tenantId this.tenantId = this.item.tenantId
} }
this.$nextTick(() => { this.$nextTick(() => {
@ -311,7 +311,7 @@
this.email = this.item.email this.email = this.item.email
this.phone = this.item.phone this.phone = this.item.phone
this.userState = this.state + '' || '1' this.userState = this.state + '' || '1'
if (this.item.tenantName) { if (this.item.tenantId) {
this.tenantId = this.item.tenantId this.tenantId = this.item.tenantId
} }
if (this.queueList.length > 0) { if (this.queueList.length > 0) {

2
e2e/src/test/resources/config/config.properties

@ -17,7 +17,7 @@
############### project ############## ############### project ##############
# login url # login url
LOGIN_URL=http://127.0.0.1:8888/dolphinscheduler/ LOGIN_URL=http://127.0.0.1:12345/dolphinscheduler/
#login username #login username
USER_NAME=admin USER_NAME=admin
#login password #login password

1
pom.xml

@ -839,6 +839,7 @@
<include>**/common/utils/IpUtilsTest.java</include> <include>**/common/utils/IpUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include> <include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include> <include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/NetUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include> <include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include> <include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include> <include>**/common/utils/TimePlaceholderUtilsTest.java</include>

0
script/create-dolphinscheduler.sh

6
script/dolphinscheduler-daemon.sh

@ -54,6 +54,12 @@ fi
log=$DOLPHINSCHEDULER_LOG_DIR/dolphinscheduler-$command-$HOSTNAME.out log=$DOLPHINSCHEDULER_LOG_DIR/dolphinscheduler-$command-$HOSTNAME.out
pid=$DOLPHINSCHEDULER_PID_DIR/dolphinscheduler-$command.pid pid=$DOLPHINSCHEDULER_PID_DIR/dolphinscheduler-$command.pid
# print logs to /dev/null in docker
if [ "$DOCKER" = "true" ]; then
echo "start in docker"
log=/dev/null
fi
cd $DOLPHINSCHEDULER_HOME cd $DOLPHINSCHEDULER_HOME
if [ "$command" = "api-server" ]; then if [ "$command" = "api-server" ]; then

0
script/env/dolphinscheduler_env.sh vendored

0
script/monitor-server.sh

0
script/scp-hosts.sh

0
script/start-all.sh

0
script/status-all.sh

0
script/stop-all.sh

0
script/upgrade-dolphinscheduler.sh

7
sql/dolphinscheduler-postgre.sql

@ -462,7 +462,7 @@ CREATE TABLE t_ds_process_instance (
start_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL ,
run_times int DEFAULT NULL , run_times int DEFAULT NULL ,
host varchar(45) DEFAULT NULL , host varchar(135) DEFAULT NULL ,
command_type int DEFAULT NULL , command_type int DEFAULT NULL ,
command_param text , command_param text ,
task_depend_type int DEFAULT NULL , task_depend_type int DEFAULT NULL ,
@ -678,7 +678,7 @@ CREATE TABLE t_ds_task_instance (
submit_time timestamp DEFAULT NULL , submit_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL ,
host varchar(45) DEFAULT NULL , host varchar(135) DEFAULT NULL ,
execute_path varchar(200) DEFAULT NULL , execute_path varchar(200) DEFAULT NULL ,
log_path varchar(200) DEFAULT NULL , log_path varchar(200) DEFAULT NULL ,
alert_flag int DEFAULT NULL , alert_flag int DEFAULT NULL ,
@ -900,8 +900,7 @@ VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0',
-- Records of t_ds_alertgroup,dolphinscheduler warning group -- Records of t_ds_alertgroup,dolphinscheduler warning group
INSERT INTO t_ds_alertgroup(id,alert_instance_ids, create_user_id, group_name, description, create_time, update_time) INSERT INTO t_ds_alertgroup(id,alert_instance_ids, create_user_id, group_name, description, create_time, update_time)
VALUES (1, '1,2','dolphinscheduler warning group', 'dolphinscheduler warning group', '2018-11-29 10:20:39', VALUES (1,'1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39');
'2018-11-29 10:20:39');
-- Records of t_ds_queue,default queue name : default -- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time) INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time)

4
sql/dolphinscheduler_mysql.sql

@ -582,7 +582,7 @@ CREATE TABLE `t_ds_process_instance` (
`start_time` datetime DEFAULT NULL COMMENT 'process instance start time', `start_time` datetime DEFAULT NULL COMMENT 'process instance start time',
`end_time` datetime DEFAULT NULL COMMENT 'process instance end time', `end_time` datetime DEFAULT NULL COMMENT 'process instance end time',
`run_times` int(11) DEFAULT NULL COMMENT 'process instance run times', `run_times` int(11) DEFAULT NULL COMMENT 'process instance run times',
`host` varchar(45) DEFAULT NULL COMMENT 'process instance host', `host` varchar(135) DEFAULT NULL COMMENT 'process instance host',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type',
`command_param` text COMMENT 'json command parameters', `command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes',
@ -819,7 +819,7 @@ CREATE TABLE `t_ds_task_instance` (
`submit_time` datetime DEFAULT NULL COMMENT 'task submit time', `submit_time` datetime DEFAULT NULL COMMENT 'task submit time',
`start_time` datetime DEFAULT NULL COMMENT 'task start time', `start_time` datetime DEFAULT NULL COMMENT 'task start time',
`end_time` datetime DEFAULT NULL COMMENT 'task end time', `end_time` datetime DEFAULT NULL COMMENT 'task end time',
`host` varchar(45) DEFAULT NULL COMMENT 'host of task running on', `host` varchar(135) DEFAULT NULL COMMENT 'host of task running on',
`execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host', `execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host',
`log_path` varchar(200) DEFAULT NULL COMMENT 'task log path', `log_path` varchar(200) DEFAULT NULL COMMENT 'task log path',
`alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert', `alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert',

58
sql/upgrade/1.3.5_schema/mysql/dolphinscheduler_ddl.sql

@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY',''));
-- uc_dolphin_T_t_ds_process_instance_R_host
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_instance_R_host;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_process_instance_R_host()
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='host')
THEN
ALTER TABLE t_ds_process_instance MODIFY COLUMN `host` varchar(135);
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_process_instance_R_host;
DROP PROCEDURE uc_dolphin_T_t_ds_process_instance_R_host;
-- uc_dolphin_T_t_ds_task_instance_R_host
drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_R_host;
delimiter d//
CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_R_host()
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND TABLE_SCHEMA=(SELECT DATABASE())
AND COLUMN_NAME ='host')
THEN
ALTER TABLE t_ds_task_instance MODIFY COLUMN `host` varchar(135);
END IF;
END;
d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_R_host;
DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_R_host;

16
sql/upgrade/1.3.5_schema/mysql/dolphinscheduler_dml.sql

@ -0,0 +1,16 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

52
sql/upgrade/1.3.5_schema/postgresql/dolphinscheduler_ddl.sql

@ -0,0 +1,52 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- uc_dolphin_T_t_ds_process_instance_A_host
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_instance_A_host() RETURNS void AS $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_process_instance'
AND COLUMN_NAME ='host')
THEN
ALTER TABLE t_ds_process_instance ALTER COLUMN host type varchar(135);
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_process_instance_A_host();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_instance_A_host();
-- uc_dolphin_T_t_ds_task_instance_A_host
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_host() RETURNS void AS $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.COLUMNS
WHERE TABLE_NAME='t_ds_task_instance'
AND COLUMN_NAME ='host')
THEN
ALTER TABLE t_ds_task_instance ALTER COLUMN host type varchar(135);
END IF;
END;
$$ LANGUAGE plpgsql;
d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_A_host();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_host();

16
sql/upgrade/1.3.5_schema/postgresql/dolphinscheduler_dml.sql

@ -0,0 +1,16 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
Loading…
Cancel
Save