Browse Source

Merge branch 'dev' into script_thread

pull/3/MERGE
CalvinKirs 3 years ago
parent
commit
35fc7dd851
  1. 2
      .github/workflows/ci_backend.yml
  2. 2
      .github/workflows/ci_e2e.yml
  3. 2
      .github/workflows/ci_ut.yml
  4. 4
      .gitignore
  5. 57
      docker/build/Dockerfile
  6. 36
      docker/build/README.md
  7. 35
      docker/build/README_zh_CN.md
  8. 0
      docker/build/checkpoint.sh
  9. 12
      docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh
  10. 9
      docker/build/conf/dolphinscheduler/logback/logback-alert.xml
  11. 9
      docker/build/conf/dolphinscheduler/logback/logback-api.xml
  12. 9
      docker/build/conf/dolphinscheduler/logback/logback-master.xml
  13. 9
      docker/build/conf/dolphinscheduler/logback/logback-worker.xml
  14. 51
      docker/build/conf/nginx/dolphinscheduler.conf
  15. 47
      docker/build/conf/zookeeper/zoo.cfg
  16. 9
      docker/build/hooks/build
  17. 0
      docker/build/hooks/push
  18. 18
      docker/build/startup-init-conf.sh
  19. 55
      docker/build/startup.sh
  20. 2
      docker/docker-swarm/check
  21. 80
      docker/docker-swarm/docker-compose.yml
  22. 84
      docker/docker-swarm/docker-stack.yml
  23. 12
      docker/docker-swarm/dolphinscheduler_env.sh
  24. 10
      docker/kubernetes/dolphinscheduler/Chart.yaml
  25. 37
      docker/kubernetes/dolphinscheduler/README.md
  26. 25
      docker/kubernetes/dolphinscheduler/requirements.yaml
  27. 25
      docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD
  28. 25
      docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD_0
  29. 25
      docker/kubernetes/dolphinscheduler/requirements.yaml~dev
  30. 25
      docker/kubernetes/dolphinscheduler/requirements.yaml~dev_0
  31. 6
      docker/kubernetes/dolphinscheduler/templates/NOTES.txt
  32. 4
      docker/kubernetes/dolphinscheduler/templates/_helpers.tpl
  33. 1
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
  34. 21
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-api.yaml
  35. 8
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml
  36. 2
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml
  37. 7
      docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml
  38. 68
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
  39. 48
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
  40. 119
      docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
  41. 2
      docker/kubernetes/dolphinscheduler/templates/ingress.yaml
  42. 2
      docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml
  43. 2
      docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml
  44. 20
      docker/kubernetes/dolphinscheduler/templates/secret-external-fs-s3a.yaml
  45. 96
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
  46. 184
      docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
  47. 4
      docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml
  48. 310
      docker/kubernetes/dolphinscheduler/values.yaml
  49. 23
      dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSender.java
  50. 24
      dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSenderTest.java
  51. 2
      dolphinscheduler-alert/src/main/resources/alert.properties
  52. 42
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
  53. 9
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
  54. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
  55. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java
  56. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
  57. 4
      dolphinscheduler-api/src/main/resources/i18n/messages.properties
  58. 4
      dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties
  59. 4
      dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties
  60. 26
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataSourceServiceTest.java
  61. 14
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
  62. 19
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java
  63. 86
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java
  64. 29
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java
  65. 26
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/NetUtils.java
  66. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java
  67. 4
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/StringUtils.java
  68. 2
      dolphinscheduler-common/src/main/resources/common.properties
  69. 39
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/DateUtilsTest.java
  70. 9
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/NetUtilsTest.java
  71. 13
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java
  72. 18
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringUtilsTest.java
  73. 38
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSource.java
  74. 2
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/HiveDataSource.java
  75. 2
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SparkDataSource.java
  76. 6
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessInstance.java
  77. 8
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/TaskInstance.java
  78. 269
      dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSourceTest.java
  79. 4
      dolphinscheduler-dist/release-docs/LICENSE
  80. 759
      dolphinscheduler-dist/release-docs/licenses/LICENSE-javax.mail.txt
  81. 9
      dolphinscheduler-dist/src/main/assembly/dolphinscheduler-binary.xml
  82. 8
      dolphinscheduler-dist/src/main/assembly/dolphinscheduler-nginx.xml
  83. 1
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/builder/TaskExecutionContextBuilder.java
  84. 14
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
  85. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java
  86. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/ConditionsTaskExecThread.java
  87. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/DependentTaskExecThread.java
  88. 4
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterSchedulerService.java
  89. 4
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java
  90. 4
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java
  91. 9
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/ProcessUtils.java
  92. 6
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/UDFUtils.java
  93. 3
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
  94. 4
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java
  95. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java
  96. 9
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
  97. 51
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
  98. 31
      dolphinscheduler-server/src/main/resources/config/install_config.conf
  99. 4
      dolphinscheduler-server/src/main/resources/worker.properties
  100. 4
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/dispatch/executor/NettyExecutorManagerTest.java
  101. Some files were not shown because too many files have changed in this diff Show More

2
.github/workflows/ci_backend.yml

@ -49,7 +49,7 @@ jobs:
with:
submodule: true
- name: Check License Header
uses: apache/skywalking-eyes@9bd5feb
uses: apache/skywalking-eyes@ec88b7d850018c8983f87729ea88549e100c5c82
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:

2
.github/workflows/ci_e2e.yml

@ -33,7 +33,7 @@ jobs:
with:
submodule: true
- name: Check License Header
uses: apache/skywalking-eyes@9bd5feb
uses: apache/skywalking-eyes@ec88b7d850018c8983f87729ea88549e100c5c82
- uses: actions/cache@v1
with:
path: ~/.m2/repository

2
.github/workflows/ci_ut.yml

@ -36,7 +36,7 @@ jobs:
with:
submodule: true
- name: Check License Header
uses: apache/skywalking-eyes@9bd5feb
uses: apache/skywalking-eyes@ec88b7d850018c8983f87729ea88549e100c5c82
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Only enable review / suggestion here
- uses: actions/cache@v1

4
.gitignore vendored

@ -7,6 +7,10 @@
.target
.idea/
target/
dist/
all-dependencies.txt
self-modules.txt
third-party-dependencies.txt
.settings
.nbproject
.classpath

57
docker/build/Dockerfile

@ -15,55 +15,37 @@
# limitations under the License.
#
FROM nginx:alpine
FROM openjdk:8-jdk-alpine
ARG VERSION
ENV TZ Asia/Shanghai
ENV LANG C.UTF-8
ENV DEBIAN_FRONTEND noninteractive
ENV DOCKER true
#1. install dos2unix shadow bash openrc python sudo vim wget iputils net-tools ssh pip tini kazoo.
#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
# 1. install command/library/software
# If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
# RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
# RUN sed -i 's/dl-cdn.alpinelinux.org/mirror.tuna.tsinghua.edu.cn/g' /etc/apk/repositories
RUN apk update && \
apk add --update --no-cache dos2unix shadow bash openrc python2 python3 sudo vim wget iputils net-tools openssh-server py-pip tini && \
apk add --update --no-cache procps && \
openrc boot && \
pip install kazoo
apk add --no-cache tzdata dos2unix bash python2 python3 procps sudo shadow tini postgresql-client && \
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
apk del tzdata && \
rm -rf /var/cache/apk/*
#2. install jdk
RUN apk add --update --no-cache openjdk8
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
ENV PATH $JAVA_HOME/bin:$PATH
#3. add dolphinscheduler
# 2. add dolphinscheduler
ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz /opt/
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin/ /opt/dolphinscheduler/
RUN ln -s /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin /opt/dolphinscheduler
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#4. install database, if use mysql as your backend database, the `mysql-client` package should be installed
RUN apk add --update --no-cache postgresql postgresql-contrib
#5. modify nginx
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \
rm -rf /etc/nginx/conf.d/*
COPY ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
#6. add configuration and modify permissions and set soft links
# 3. add configuration and modify permissions and set soft links
COPY ./checkpoint.sh /root/checkpoint.sh
COPY ./startup-init-conf.sh /root/startup-init-conf.sh
COPY ./startup.sh /root/startup.sh
COPY ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/
COPY ./conf/dolphinscheduler/logback/* /opt/dolphinscheduler/conf/
COPY conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/
RUN chmod +x /root/checkpoint.sh && \
chmod +x /root/startup-init-conf.sh && \
chmod +x /root/startup.sh && \
chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
chmod +x /opt/dolphinscheduler/script/*.sh && \
chmod +x /opt/dolphinscheduler/bin/*.sh && \
dos2unix /root/checkpoint.sh && \
COPY ./conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/
RUN dos2unix /root/checkpoint.sh && \
dos2unix /root/startup-init-conf.sh && \
dos2unix /root/startup.sh && \
dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
@ -71,13 +53,10 @@ RUN chmod +x /root/checkpoint.sh && \
dos2unix /opt/dolphinscheduler/bin/*.sh && \
rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls && \
#7. remove apk index cache and disable coredup for sudo
rm -rf /var/cache/apk/* && \
mkdir -p /var/mail /tmp/xls && \
echo "Set disable_coredump false" >> /etc/sudo.conf
#8. expose port
EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888
# 4. expose port
EXPOSE 5678 1234 12345 50051
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]

36
docker/build/README.md

@ -15,9 +15,9 @@ Official Website: https://dolphinscheduler.apache.org
#### You can start a dolphinscheduler instance
```
$ docker run -dit --name dolphinscheduler \
$ docker run -dit --name dolphinscheduler \
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
-p 8888:8888 \
-p 12345:12345 \
dolphinscheduler all
```
@ -33,7 +33,7 @@ You can specify **existing postgres service**. Example:
$ docker run -dit --name dolphinscheduler \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 8888:8888 \
-p 12345:12345 \
dolphinscheduler all
```
@ -43,7 +43,7 @@ You can specify **existing zookeeper service**. Example:
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
-p 8888:8888 \
-p 12345:12345 \
dolphinscheduler all
```
@ -90,14 +90,6 @@ $ docker run -dit --name dolphinscheduler \
dolphinscheduler alert-server
```
* Start a **frontend**, For example:
```
$ docker run -dit --name dolphinscheduler \
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
-p 8888:8888 \
dolphinscheduler frontend
```
**Note**: You must be specify `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
@ -146,7 +138,7 @@ This environment variable sets the host for database. The default value is `127.
This environment variable sets the port for database. The default value is `5432`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
**`DATABASE_USERNAME`**
@ -306,18 +298,6 @@ This environment variable sets enterprise wechat agent id for `alert-server`. Th
This environment variable sets enterprise wechat users for `alert-server`. The default value is empty.
**`FRONTEND_API_SERVER_HOST`**
This environment variable sets api server host for `frontend`. The default value is `127.0.0.1`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
**`FRONTEND_API_SERVER_PORT`**
This environment variable sets api server port for `frontend`. The default value is `123451`.
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
## Initialization scripts
If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`.
@ -326,7 +306,7 @@ For example, to add an environment variable `API_SERVER_PORT` in `/root/start-in
```
export API_SERVER_PORT=5555
```
```
and to modify `/opt/dolphinscheduler/conf/application-api.properties.tpl` template file, add server port:
```
@ -343,8 +323,4 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
EOF
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
done
echo "generate nginx config"
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
```

35
docker/build/README_zh_CN.md

@ -15,9 +15,9 @@ Official Website: https://dolphinscheduler.apache.org
#### 你可以运行一个dolphinscheduler实例
```
$ docker run -dit --name dolphinscheduler \
$ docker run -dit --name dolphinscheduler \
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
-p 8888:8888 \
-p 12345:12345 \
dolphinscheduler all
```
@ -33,7 +33,7 @@ dolphinscheduler all
$ docker run -dit --name dolphinscheduler \
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
-p 8888:8888 \
-p 12345:12345 \
dolphinscheduler all
```
@ -43,7 +43,7 @@ dolphinscheduler all
$ docker run -dit --name dolphinscheduler \
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
-p 8888:8888 \
-p 12345:12345 \
dolphinscheduler all
```
@ -90,15 +90,6 @@ $ docker run -dit --name dolphinscheduler \
dolphinscheduler alert-server
```
* 启动一个 **frontend**, 如下:
```
$ docker run -dit --name dolphinscheduler \
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
-p 8888:8888 \
dolphinscheduler frontend
```
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM`
## 如何构建一个docker镜像
@ -306,18 +297,6 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些
配置`alert-server`的邮件服务企业微信`USERS`,默认值 `空`
**`FRONTEND_API_SERVER_HOST`**
配置`frontend`的连接`api-server`的地址,默认值 `127.0.0.1`
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
**`FRONTEND_API_SERVER_PORT`**
配置`frontend`的连接`api-server`的端口,默认值 `12345`
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
## 初始化脚本
如果你想在编译的时候或者运行的时候附加一些其它的操作及新增一些环境变量,你可以在`/root/start-init-conf.sh`文件中进行修改,同时如果涉及到配置文件的修改,请在`/opt/dolphinscheduler/conf/*.tpl`中修改相应的配置文件
@ -326,7 +305,7 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些
```
export API_SERVER_PORT=5555
```
```
当添加以上环境变量后,你应该在相应的模板文件`/opt/dolphinscheduler/conf/application-api.properties.tpl`中添加这个环境变量配置:
```
@ -343,8 +322,4 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
EOF
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
done
echo "generate nginx config"
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
```

0
docker/build/checkpoint.sh

12
docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh vendored

@ -15,6 +15,14 @@
# limitations under the License.
#
export PYTHON_HOME=/usr/bin/python2
export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
export PATH=$PYTHON_HOME/bin:$JAVA_HOME/bin:$PATH
export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink
export DATAX_HOME=/opt/soft/datax/bin/datax.py
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH

9
docker/build/conf/dolphinscheduler/logback/logback-alert.xml

@ -20,14 +20,6 @@
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-alert.log</file>
@ -45,7 +37,6 @@
</appender>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="ALERTLOGFILE"/>
</root>

9
docker/build/conf/dolphinscheduler/logback/logback-api.xml

@ -20,14 +20,6 @@
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- api server logback config start -->
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
@ -55,7 +47,6 @@
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
</root>

9
docker/build/conf/dolphinscheduler/logback/logback-master.xml

@ -20,14 +20,6 @@
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<conversionRule conversionWord="messsage"
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
@ -74,7 +66,6 @@
<!-- master server logback config end -->
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="MASTERLOGFILE"/>
</root>

9
docker/build/conf/dolphinscheduler/logback/logback-worker.xml

@ -20,14 +20,6 @@
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- worker server logback config start -->
<conversionRule conversionWord="messsage"
@ -75,7 +67,6 @@
<!-- worker server logback config end -->
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/>
</root>

51
docker/build/conf/nginx/dolphinscheduler.conf

@ -1,51 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
server {
listen 8888;
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /opt/dolphinscheduler/ui;
index index.html index.html;
}
location /dolphinscheduler/ui{
alias /opt/dolphinscheduler/ui;
}
location /dolphinscheduler {
proxy_pass http://FRONTEND_API_SERVER_HOST:FRONTEND_API_SERVER_PORT;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header x_real_ipP $remote_addr;
proxy_set_header remote_addr $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_connect_timeout 300s;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

47
docker/build/conf/zookeeper/zoo.cfg

@ -1,47 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
#Four Letter Words commands:stat,ruok,conf,isro
4lw.commands.whitelist=*

9
docker/build/hooks/build

@ -48,7 +48,12 @@ echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubati
mv "$(pwd)"/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-"${VERSION}"-dolphinscheduler-bin.tar.gz $(pwd)/docker/build/
# docker build
echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/\n"
sudo docker build --build-arg VERSION="${VERSION}" -t $DOCKER_REPO:"${VERSION}" "$(pwd)/docker/build/"
BUILD_COMMAND="docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/"
echo -e "$BUILD_COMMAND\n"
if (docker info 2> /dev/null | grep -i "ERROR"); then
sudo $BUILD_COMMAND
else
$BUILD_COMMAND
fi
echo "------ dolphinscheduler end - build -------"

0
docker/build/hooks/push

18
docker/build/startup-init-conf.sh

@ -39,9 +39,9 @@ export DATABASE_PARAMS=${DATABASE_PARAMS:-"characterEncoding=utf8"}
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"}
export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"}
export DOLPHINSCHEDULER_OPTS=${DOLPHINSCHEDULER_OPTS:-""}
export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"NONE"}
export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/ds"}
export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"s3a://xxxx"}
export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"HDFS"}
export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/dolphinscheduler"}
export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"file:///"}
export FS_S3A_ENDPOINT=${FS_S3A_ENDPOINT:-"s3.xxx.amazonaws.com"}
export FS_S3A_ACCESS_KEY=${FS_S3A_ACCESS_KEY:-"xxxxxxx"}
export FS_S3A_SECRET_KEY=${FS_S3A_SECRET_KEY:-"xxxxxxx"}
@ -81,7 +81,7 @@ export WORKER_WEIGHT=${WORKER_WEIGHT:-"100"}
#============================================================================
# alert plugin dir
export ALERT_PLUGIN_DIR=${ALERT_PLUGIN_DIR:-"/opt/dolphinscheduler"}
# XLS FILE
# xls file
export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"}
# mail
export MAIL_SERVER_HOST=${MAIL_SERVER_HOST:-""}
@ -99,12 +99,6 @@ export ENTERPRISE_WECHAT_SECRET=${ENTERPRISE_WECHAT_SECRET:-""}
export ENTERPRISE_WECHAT_AGENT_ID=${ENTERPRISE_WECHAT_AGENT_ID:-""}
export ENTERPRISE_WECHAT_USERS=${ENTERPRISE_WECHAT_USERS:-""}
#============================================================================
# Frontend
#============================================================================
export FRONTEND_API_SERVER_HOST=${FRONTEND_API_SERVER_HOST:-"127.0.0.1"}
export FRONTEND_API_SERVER_PORT=${FRONTEND_API_SERVER_PORT:-"12345"}
echo "generate app config"
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
eval "cat << EOF
@ -112,7 +106,3 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
EOF
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
done
echo "generate nginx config"
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf

55
docker/build/startup.sh

@ -22,8 +22,8 @@ DOLPHINSCHEDULER_BIN=${DOLPHINSCHEDULER_HOME}/bin
DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script
DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
# start database
initDatabase() {
# wait database
waitDatabase() {
echo "test ${DATABASE_TYPE} service"
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
@ -43,19 +43,22 @@ initDatabase() {
exit 1
fi
else
v=$(sudo -u postgres PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
v=$(PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
if [ "$(echo ${v} | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
fi
}
# init database
initDatabase() {
echo "import sql data"
${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh
}
# start zk
initZK() {
# wait zk
waitZK() {
echo "connect remote zookeeper"
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
@ -70,12 +73,6 @@ initZK() {
done
}
# start nginx
initNginx() {
echo "start nginx"
nginx &
}
# start master-server
initMasterServer() {
echo "start master-server"
@ -115,59 +112,54 @@ initAlertServer() {
printUsage() {
echo -e "Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system,"
echo -e "dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.\n"
echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server | frontend ]\n"
printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server, alert-server and frontend."
echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server ]\n"
printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server and alert-server"
printf "%-13s: %s\n" "master-server" "MasterServer is mainly responsible for DAG task split, task submission monitoring."
printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services.."
printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests from the front-end UI layer."
printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services."
printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests and providing the front-end UI layer."
printf "%-13s: %s\n" "alert-server" "AlertServer mainly include Alarms."
printf "%-13s: %s\n" "frontend" "Frontend mainly provides various visual operation interfaces of the system."
}
# init config file
source /root/startup-init-conf.sh
LOGFILE=/var/log/nginx/access.log
case "$1" in
(all)
initZK
waitZK
waitDatabase
initDatabase
initMasterServer
initWorkerServer
initApiServer
initAlertServer
initLoggerServer
initNginx
LOGFILE=/var/log/nginx/access.log
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
;;
(master-server)
initZK
initDatabase
waitZK
waitDatabase
initMasterServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log
;;
(worker-server)
initZK
initDatabase
waitZK
waitDatabase
initWorkerServer
initLoggerServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log
;;
(api-server)
initZK
waitZK
waitDatabase
initDatabase
initApiServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
;;
(alert-server)
initDatabase
waitDatabase
initAlertServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log
;;
(frontend)
initNginx
LOGFILE=/var/log/nginx/access.log
;;
(help)
printUsage
exit 1
@ -179,8 +171,7 @@ case "$1" in
esac
# init directories and log files
mkdir -p ${DOLPHINSCHEDULER_LOGS} && mkdir -p /var/log/nginx/ && cat /dev/null >> ${LOGFILE}
mkdir -p ${DOLPHINSCHEDULER_LOGS} && cat /dev/null >> ${LOGFILE}
echo "tail begin"
exec bash -c "tail -n 1 -f ${LOGFILE}"

2
docker/docker-swarm/check

@ -25,7 +25,7 @@ else
echo "Server start failed "$server_num
exit 1
fi
ready=`curl http://127.0.0.1:8888/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l`
ready=`curl http://127.0.0.1:12345/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l`
if [ $ready -eq 1 ]
then
echo "Servers is ready"

80
docker/docker-swarm/docker-compose.yml

@ -31,6 +31,7 @@ services:
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
- dolphinscheduler-postgresql-initdb:/docker-entrypoint-initdb.d
restart: unless-stopped
networks:
- dolphinscheduler
@ -45,13 +46,14 @@ services:
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
restart: unless-stopped
networks:
- dolphinscheduler
dolphinscheduler-api:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-api
command: ["api-server"]
command: api-server
ports:
- 12345:12345
environment:
@ -62,6 +64,9 @@ services:
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
RESOURCE_STORAGE_TYPE: HDFS
RESOURCE_UPLOAD_PATH: /dolphinscheduler
FS_DEFAULT_FS: file:///
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30s
@ -72,37 +77,16 @@ services:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-frontend:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-frontend
command: ["frontend"]
ports:
- 8888:8888
environment:
TZ: Asia/Shanghai
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
FRONTEND_API_SERVER_PORT: 12345
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "8888"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-api
volumes:
- ./dolphinscheduler-logs:/var/log/nginx
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-resource-local:/dolphinscheduler
restart: unless-stopped
networks:
- dolphinscheduler
dolphinscheduler-alert:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-alert
command: ["alert-server"]
command: alert-server
environment:
TZ: Asia/Shanghai
XLS_FILE_PATH: "/tmp/xls"
@ -133,14 +117,15 @@ services:
depends_on:
- dolphinscheduler-postgresql
volumes:
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
restart: unless-stopped
networks:
- dolphinscheduler
dolphinscheduler-master:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-master
command: ["master-server"]
command: master-server
ports:
- 5678:5678
environment:
@ -168,14 +153,15 @@ services:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
restart: unless-stopped
networks:
- dolphinscheduler
dolphinscheduler-worker:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-worker
command: ["worker-server"]
command: worker-server
ports:
- 1234:1234
- 50051:50051
@ -188,30 +174,40 @@ services:
WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default"
WORKER_WEIGHT: "100"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
RESOURCE_STORAGE_TYPE: HDFS
RESOURCE_UPLOAD_PATH: /dolphinscheduler
FS_DEFAULT_FS: file:///
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
depends_on:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- type: bind
source: ./dolphinscheduler_env.sh
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
- type: volume
source: dolphinscheduler-worker-data
target: /tmp/dolphinscheduler
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
- ./dolphinscheduler_env.sh:/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-resource-local:/dolphinscheduler
restart: unless-stopped
networks:
- dolphinscheduler
@ -224,7 +220,5 @@ volumes:
dolphinscheduler-postgresql-initdb:
dolphinscheduler-zookeeper:
dolphinscheduler-worker-data:
configs:
dolphinscheduler-worker-task-env:
file: ./dolphinscheduler_env.sh
dolphinscheduler-logs:
dolphinscheduler-resource-local:

84
docker/docker-swarm/docker-stack.yml

@ -20,13 +20,13 @@ services:
dolphinscheduler-postgresql:
image: bitnami/postgresql:latest
ports:
- 5432:5432
environment:
TZ: Asia/Shanghai
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ports:
- 5432:5432
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
networks:
@ -37,12 +37,12 @@ services:
dolphinscheduler-zookeeper:
image: bitnami/zookeeper:latest
ports:
- 2181:2181
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
ports:
- 2181:2181
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
@ -53,7 +53,9 @@ services:
dolphinscheduler-api:
image: apache/dolphinscheduler:latest
command: ["api-server"]
command: api-server
ports:
- 12345:12345
environment:
TZ: Asia/Shanghai
DATABASE_HOST: dolphinscheduler-postgresql
@ -62,39 +64,17 @@ services:
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
ports:
- 12345:12345
RESOURCE_STORAGE_TYPE: HDFS
RESOURCE_UPLOAD_PATH: /dolphinscheduler
FS_DEFAULT_FS: file:///
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
mode: replicated
replicas: 1
dolphinscheduler-frontend:
image: apache/dolphinscheduler:latest
command: ["frontend"]
ports:
- 8888:8888
environment:
TZ: Asia/Shanghai
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
FRONTEND_API_SERVER_PORT: 12345
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "8888"]
interval: 30
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-logs:/var/log/nginx
networks:
- dolphinscheduler
deploy:
@ -103,7 +83,7 @@ services:
dolphinscheduler-alert:
image: apache/dolphinscheduler:latest
command: ["alert-server"]
command: alert-server
environment:
TZ: Asia/Shanghai
XLS_FILE_PATH: "/tmp/xls"
@ -127,13 +107,13 @@ services:
DATABASE_DATABASE: dolphinscheduler
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
interval: 30
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
networks:
- dolphinscheduler
deploy:
mode: replicated
@ -141,10 +121,10 @@ services:
dolphinscheduler-master:
image: apache/dolphinscheduler:latest
command: ["master-server"]
ports:
command: master-server
ports:
- 5678:5678
environment:
environment:
TZ: Asia/Shanghai
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
@ -161,7 +141,7 @@ services:
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
interval: 30
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
@ -175,11 +155,11 @@ services:
dolphinscheduler-worker:
image: apache/dolphinscheduler:latest
command: ["worker-server"]
ports:
command: worker-server
ports:
- 1234:1234
- 50051:50051
environment:
environment:
TZ: Asia/Shanghai
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
@ -188,25 +168,37 @@ services:
WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default"
WORKER_WEIGHT: "100"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432
DATABASE_USERNAME: root
DATABASE_PASSWORD: root
DATABASE_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
RESOURCE_STORAGE_TYPE: HDFS
RESOURCE_UPLOAD_PATH: /dolphinscheduler
FS_DEFAULT_FS: file:///
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
interval: 30
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
configs:
- source: dolphinscheduler-worker-task-env
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
volumes:
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:

12
docker/docker-swarm/dolphinscheduler_env.sh

@ -15,6 +15,14 @@
# limitations under the License.
#
export PYTHON_HOME=/usr/bin/python2
export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink
export DATAX_HOME=/opt/soft/datax/bin/datax.py
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH

10
docker/kubernetes/dolphinscheduler/Chart.yaml

@ -22,7 +22,7 @@ home: https://dolphinscheduler.apache.org
icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
keywords:
- dolphinscheduler
- Scheduler
- scheduler
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
@ -35,18 +35,18 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 1.0.0
version: 1.2.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 1.3.0
appVersion: 1.4.0
dependencies:
- name: postgresql
version: 8.x.x
version: 10.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
version: 6.x.x
repository: https://charts.bitnami.com/bitnami
condition: zookeeper.enabled

37
docker/kubernetes/dolphinscheduler/README.md

@ -7,19 +7,20 @@ This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org)
## Prerequisites
- Kubernetes 1.10+
- Helm 3.1.0+
- Kubernetes 1.12+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
To install the chart with the release name `dolphinscheduler`:
```bash
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
$ cd incubator-dolphinscheduler/kubernetes/dolphinscheduler
$ cd incubator-dolphinscheduler/docker/kubernetes/dolphinscheduler
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm dependency update .
$ helm install --name dolphinscheduler .
$ helm install dolphinscheduler .
```
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
@ -30,7 +31,7 @@ These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default
To uninstall/delete the `dolphinscheduler` deployment:
```bash
$ helm delete --purge dolphinscheduler
$ helm uninstall dolphinscheduler
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
@ -220,32 +221,6 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
| `frontend.resources` | The `resource` limit and request config for frontend server. | `{}` |
| `frontend.annotations` | The `annotations` for frontend server. | `{}` |
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
| | | |
| `ingress.enabled` | Enable ingress | `false` |
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
| `ingress.path` | Ingress path | `/` |

25
docker/kubernetes/dolphinscheduler/requirements.yaml

@ -1,25 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

25
docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD

@ -1,25 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

25
docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD_0

@ -1,25 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

25
docker/kubernetes/dolphinscheduler/requirements.yaml~dev

@ -1,25 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

25
docker/kubernetes/dolphinscheduler/requirements.yaml~dev_0

@ -1,25 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies:
- name: postgresql
version: 8.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: zookeeper
version: 5.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled

6
docker/kubernetes/dolphinscheduler/templates/NOTES.txt

@ -15,9 +15,9 @@
# limitations under the License.
#
** Please be patient while the chart is being deployed **
** Please be patient while the chart Dolphinscheduler {{ .Chart.AppVersion }} is being deployed **
1. Get the Dolphinscheduler URL by running:
Get the Dolphinscheduler URL by running:
{{- if .Values.ingress.enabled }}
@ -26,6 +26,6 @@
{{- else }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-api 12345:12345
{{- end }}

4
docker/kubernetes/dolphinscheduler/templates/_helpers.tpl

@ -135,7 +135,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
Create a default dolphinscheduler worker base dir.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "dolphinscheduler.worker.base.dir" -}}
{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
{{- define "dolphinscheduler.data.basedir.path" -}}
{{- $name := default "/tmp/dolphinscheduler" .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
{{- end -}}

1
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml

@ -24,6 +24,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
DOLPHINSCHEDULER_OPTS: {{ .Values.alert.configmap.DOLPHINSCHEDULER_OPTS | quote }}
ALERT_PLUGIN_DIR: {{ .Values.alert.configmap.ALERT_PLUGIN_DIR | quote }}
XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}

21
docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml → docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-api.yaml

@ -14,22 +14,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.api.configmap }}
apiVersion: v1
kind: Service
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
name: {{ include "dolphinscheduler.fullname" . }}-api
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
ports:
- port: 8888
targetPort: tcp-port
protocol: TCP
name: tcp-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
data:
DOLPHINSCHEDULER_OPTS: {{ .Values.api.configmap.DOLPHINSCHEDULER_OPTS | quote }}
{{- end }}

8
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml

@ -24,12 +24,14 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
DOLPHINSCHEDULER_ENV_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_ENV_PATH | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH | quote }}
DOLPHINSCHEDULER_ENV: |-
{{- range .Values.common.configmap.DOLPHINSCHEDULER_ENV }}
{{ . }}
{{- end }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.data.basedir.path" . | quote }}
RESOURCE_STORAGE_TYPE: {{ .Values.common.configmap.RESOURCE_STORAGE_TYPE | quote }}
RESOURCE_UPLOAD_PATH: {{ .Values.common.configmap.RESOURCE_UPLOAD_PATH | quote }}
FS_DEFAULT_FS: {{ .Values.common.configmap.FS_DEFAULT_FS | quote }}
FS_S3A_ENDPOINT: {{ .Values.common.configmap.FS_S3A_ENDPOINT | quote }}
FS_S3A_ACCESS_KEY: {{ .Values.common.configmap.FS_S3A_ACCESS_KEY | quote }}
FS_S3A_SECRET_KEY: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | quote }}
{{- end }}

2
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml

@ -24,6 +24,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
DOLPHINSCHEDULER_OPTS: {{ .Values.master.configmap.DOLPHINSCHEDULER_OPTS | quote }}
MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }}
MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }}
MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }}
@ -32,5 +33,4 @@ data:
MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
MASTER_LISTEN_PORT: {{ .Values.master.configmap.MASTER_LISTEN_PORT | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
{{- end }}

7
docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml

@ -24,17 +24,12 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
DOLPHINSCHEDULER_OPTS: {{ .Values.worker.configmap.DOLPHINSCHEDULER_OPTS | quote }}
WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }}
WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }}
WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
WORKER_LISTEN_PORT: {{ .Values.worker.configmap.WORKER_LISTEN_PORT | quote }}
WORKER_GROUP: {{ .Values.worker.configmap.WORKER_GROUP | quote }}
WORKER_WEIGHT: {{ .Values.worker.configmap.WORKER_WEIGHT | quote }}
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
dolphinscheduler_env.sh: |-
{{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
{{ . }}
{{- end }}
{{- end }}

68
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml

@ -57,35 +57,6 @@ spec:
{{- if .Values.alert.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
@ -93,14 +64,17 @@ spec:
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
image: {{ include "dolphinscheduler.image.repository" . | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "alert-server"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.alert.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: DOLPHINSCHEDULER_OPTS
valueFrom:
configMapKeyRef:
key: DOLPHINSCHEDULER_OPTS
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: ALERT_PLUGIN_DIR
valueFrom:
configMapKeyRef:
@ -228,36 +202,6 @@ spec:
{{- else }}
value: {{ .Values.externalDatabase.params | quote }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.alert.resources }}
resources:
limits:

48
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml

@ -57,35 +57,6 @@ spec:
{{- if .Values.api.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
@ -93,17 +64,20 @@ spec:
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-api
image: {{ include "dolphinscheduler.image.repository" . | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "api-server"
ports:
- containerPort: 12345
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.api.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: DOLPHINSCHEDULER_OPTS
valueFrom:
configMapKeyRef:
key: DOLPHINSCHEDULER_OPTS
name: {{ include "dolphinscheduler.fullname" . }}-api
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
@ -164,7 +138,7 @@ spec:
{{- end }}
- name: ZOOKEEPER_ROOT
{{- if .Values.zookeeper.enabled }}
value: "/dolphinscheduler"
value: {{ .Values.zookeeper.zookeeperRoot }}
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperRoot }}
{{- end }}
@ -183,6 +157,7 @@ spec:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }}
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
@ -195,9 +170,10 @@ spec:
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
secretKeyRef:
key: fs-s3a-secret-key
name: {{ printf "%s-%s" .Release.Name "fs-s3a" }}
{{- end }}
{{- if .Values.api.resources }}
resources:
limits:

119
docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml

@ -1,119 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
spec:
replicas: {{ .Values.frontend.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
strategy:
type: {{ .Values.frontend.strategy.type | quote }}
rollingUpdate:
maxSurge: {{ .Values.frontend.strategy.rollingUpdate.maxSurge | quote }}
maxUnavailable: {{ .Values.frontend.strategy.rollingUpdate.maxUnavailable | quote }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: frontend
{{- if .Values.alert.annotations }}
annotations:
{{- toYaml .Values.alert.annotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.frontend.affinity }}
affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
{{- end }}
{{- if .Values.frontend.nodeSelector }}
nodeSelector: {{- toYaml .Values.frontend.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.frontend.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
{{- end }}
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
image: {{ include "dolphinscheduler.image.repository" . | quote }}
args:
- "frontend"
ports:
- containerPort: 8888
name: tcp-port
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: TZ
value: {{ .Values.timezone }}
- name: FRONTEND_API_SERVER_HOST
value: '{{ include "dolphinscheduler.fullname" . }}-api'
- name: FRONTEND_API_SERVER_PORT
value: "12345"
{{- if .Values.frontend.resources }}
resources:
limits:
memory: {{ .Values.frontend.resources.limits.memory | quote }}
cpu: {{ .Values.frontend.resources.limits.cpu | quote }}
requests:
memory: {{ .Values.frontend.resources.requests.memory | quote }}
cpu: {{ .Values.frontend.resources.requests.cpu | quote }}
{{- end }}
{{- if .Values.frontend.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.frontend.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: {{ .Values.frontend.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.frontend.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.frontend.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.frontend.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: "/var/log/nginx"
name: {{ include "dolphinscheduler.fullname" . }}-frontend
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
persistentVolumeClaim:
claimName: {{ include "dolphinscheduler.fullname" . }}-frontend
{{- else }}
emptyDir: {}
{{- end }}

2
docker/kubernetes/dolphinscheduler/templates/ingress.yaml

@ -30,7 +30,7 @@ spec:
paths:
- path: {{ .Values.ingress.path }}
backend:
serviceName: {{ include "dolphinscheduler.fullname" . }}-frontend
serviceName: {{ include "dolphinscheduler.fullname" . }}-api
servicePort: tcp-port
{{- if .Values.ingress.tls.enabled }}
tls:

2
docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml

@ -25,7 +25,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.alert.persistentVolumeClaim.accessModes }}
{{- range .Values.alert.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }}

2
docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml

@ -25,7 +25,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.api.persistentVolumeClaim.accessModes }}
{{- range .Values.api.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }}

20
docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml → docker/kubernetes/dolphinscheduler/templates/secret-external-fs-s3a.yaml

@ -14,22 +14,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
{{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }}
apiVersion: v1
kind: PersistentVolumeClaim
kind: Secret
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-frontend
name: {{ printf "%s-%s" .Release.Name "fs-s3a" }}
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-fs-s3a
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
{{- range .Values.frontend.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
storageClassName: {{ .Values.frontend.persistentVolumeClaim.storageClassName | quote }}
resources:
requests:
storage: {{ .Values.frontend.persistentVolumeClaim.storage | quote }}
type: Opaque
data:
fs-s3a-secret-key: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | b64enc | quote }}
{{- end }}

96
docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml

@ -54,59 +54,6 @@ spec:
{{- if .Values.master.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
@ -114,17 +61,20 @@ spec:
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-master
image: {{ include "dolphinscheduler.image.repository" . | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "master-server"
ports:
- containerPort: {{ .Values.master.configmap.MASTER_LISTEN_PORT }}
name: "master-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.master.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: DOLPHINSCHEDULER_OPTS
valueFrom:
configMapKeyRef:
key: DOLPHINSCHEDULER_OPTS
name: {{ include "dolphinscheduler.fullname" . }}-master
- name: MASTER_EXEC_THREADS
valueFrom:
configMapKeyRef:
@ -168,7 +118,7 @@ spec:
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
name: {{ include "dolphinscheduler.fullname" . }}-common
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
@ -230,40 +180,10 @@ spec:
{{- end }}
- name: ZOOKEEPER_ROOT
{{- if .Values.zookeeper.enabled }}
value: "/dolphinscheduler"
value: {{ .Values.zookeeper.zookeeperRoot }}
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperRoot }}
{{- end }}
- name: RESOURCE_STORAGE_TYPE
valueFrom:
configMapKeyRef:
key: RESOURCE_STORAGE_TYPE
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: RESOURCE_UPLOAD_PATH
valueFrom:
configMapKeyRef:
key: RESOURCE_UPLOAD_PATH
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_DEFAULT_FS
valueFrom:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
key: FS_S3A_ENDPOINT
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_ACCESS_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_ACCESS_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if .Values.master.resources }}
resources:
limits:

184
docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml

@ -54,59 +54,6 @@ spec:
{{- if .Values.worker.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- name: init-zookeeper
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 60
done
done
env:
- name: ZOOKEEPER_QUORUM
{{- if .Values.zookeeper.enabled }}
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
{{- end }}
- name: init-database
image: busybox:1.31.0
command:
- /bin/sh
- -ec
- |
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 5 ]; then
echo "Error: Couldn't connect to database."
exit 1
fi
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 60
done
env:
- name: DATABASE_HOST
{{- if .Values.postgresql.enabled }}
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
{{- else }}
value: {{ .Values.externalDatabase.host | quote }}
{{- end }}
- name: DATABASE_PORT
{{- if .Values.postgresql.enabled }}
value: "5432"
{{- else }}
value: {{ .Values.externalDatabase.port | quote }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
@ -114,29 +61,27 @@ spec:
containers:
- name: {{ include "dolphinscheduler.fullname" . }}-worker
image: {{ include "dolphinscheduler.image.repository" . | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "worker-server"
ports:
- containerPort: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }}
name: "worker-port"
- containerPort: 50051
name: "logs-port"
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: "logger-port"
env:
- name: DOLPHINSCHEDULER_OPTS
value: {{ default "" .Values.worker.jvmOptions }}
- name: TZ
value: {{ .Values.timezone }}
- name: WORKER_EXEC_THREADS
- name: DOLPHINSCHEDULER_OPTS
valueFrom:
configMapKeyRef:
key: DOLPHINSCHEDULER_OPTS
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_EXEC_THREADS
- name: WORKER_FETCH_TASK_NUM
- name: WORKER_EXEC_THREADS
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-worker
key: WORKER_FETCH_TASK_NUM
key: WORKER_EXEC_THREADS
- name: WORKER_HEARTBEAT_INTERVAL
valueFrom:
configMapKeyRef:
@ -170,8 +115,58 @@ spec:
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
valueFrom:
configMapKeyRef:
name: {{ include "dolphinscheduler.fullname" . }}-master
name: {{ include "dolphinscheduler.fullname" . }}-common
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
- name: ALERT_PLUGIN_DIR
valueFrom:
configMapKeyRef:
key: ALERT_PLUGIN_DIR
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
key: XLS_FILE_PATH
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_HOST
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_HOST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_PORT
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_PORT
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SENDER
valueFrom:
configMapKeyRef:
key: MAIL_SENDER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_USER
valueFrom:
configMapKeyRef:
key: MAIL_USER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_PASSWD
valueFrom:
configMapKeyRef:
key: MAIL_PASSWD
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_STARTTLS_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_STARTTLS_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_TRUST
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_TRUST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: DATABASE_TYPE
{{- if .Values.postgresql.enabled }}
value: "postgresql"
@ -232,7 +227,7 @@ spec:
{{- end }}
- name: ZOOKEEPER_ROOT
{{- if .Values.zookeeper.enabled }}
value: "/dolphinscheduler"
value: {{ .Values.zookeeper.zookeeperRoot }}
{{- else }}
value: {{ .Values.externalZookeeper.zookeeperRoot }}
{{- end }}
@ -251,6 +246,7 @@ spec:
configMapKeyRef:
key: FS_DEFAULT_FS
name: {{ include "dolphinscheduler.fullname" . }}-common
{{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }}
- name: FS_S3A_ENDPOINT
valueFrom:
configMapKeyRef:
@ -263,54 +259,10 @@ spec:
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: FS_S3A_SECRET_KEY
valueFrom:
configMapKeyRef:
key: FS_S3A_SECRET_KEY
name: {{ include "dolphinscheduler.fullname" . }}-common
- name: XLS_FILE_PATH
valueFrom:
configMapKeyRef:
key: XLS_FILE_PATH
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_HOST
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_HOST
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SERVER_PORT
valueFrom:
configMapKeyRef:
key: MAIL_SERVER_PORT
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SENDER
valueFrom:
configMapKeyRef:
key: MAIL_SENDER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_USER
valueFrom:
configMapKeyRef:
key: MAIL_USER
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_PASSWD
valueFrom:
configMapKeyRef:
key: MAIL_PASSWD
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_STARTTLS_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_STARTTLS_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_ENABLE
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_ENABLE
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: MAIL_SMTP_SSL_TRUST
valueFrom:
configMapKeyRef:
key: MAIL_SMTP_SSL_TRUST
name: {{ include "dolphinscheduler.fullname" . }}-alert
secretKeyRef:
key: fs-s3a-secret-key
name: {{ printf "%s-%s" .Release.Name "fs-s3a" }}
{{- end }}
- name: ENTERPRISE_WECHAT_ENABLE
valueFrom:
configMapKeyRef:
@ -372,13 +324,13 @@ spec:
failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }}
{{- end }}
volumeMounts:
- mountPath: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
- mountPath: {{ include "dolphinscheduler.data.basedir.path" . | quote }}
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
- mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"
subPath: "dolphinscheduler_env.sh"
name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
name: {{ include "dolphinscheduler.fullname" . }}-common-env
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-worker-data
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
@ -394,12 +346,12 @@ spec:
{{- else }}
emptyDir: {}
{{- end }}
- name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
- name: {{ include "dolphinscheduler.fullname" . }}-common-env
configMap:
defaultMode: 0777
name: {{ include "dolphinscheduler.fullname" . }}-worker
name: {{ include "dolphinscheduler.fullname" . }}-common
items:
- key: dolphinscheduler_env.sh
- key: DOLPHINSCHEDULER_ENV
path: dolphinscheduler_env.sh
{{- if .Values.worker.persistentVolumeClaim.enabled }}
volumeClaimTemplates:

4
docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml

@ -30,9 +30,9 @@ spec:
protocol: TCP
name: worker-port
- port: 50051
targetPort: logs-port
targetPort: logger-port
protocol: TCP
name: logs-port
name: logger-port
selector:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
app.kubernetes.io/instance: {{ .Release.Name }}

310
docker/kubernetes/dolphinscheduler/values.yaml

@ -58,62 +58,74 @@ externalDatabase:
# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
zookeeper:
enabled: true
taskQueue: "zookeeper"
config: null
fourlwCommandsWhitelist: srvr,ruok,wchs,cons
service:
port: "2181"
persistence:
enabled: false
size: "20Gi"
storageClass: "-"
zookeeperRoot: "/dolphinscheduler"
# If exists external zookeeper, and set zookeeper.enable value to false.
# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
externalZookeeper:
taskQueue: "zookeeper"
zookeeperQuorum: "127.0.0.1:2181"
zookeeperRoot: "/dolphinscheduler"
common:
## ConfigMap
configmap:
DOLPHINSCHEDULER_ENV_PATH: "/tmp/dolphinscheduler/env"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler/files"
RESOURCE_STORAGE_TYPE: "NONE"
RESOURCE_UPLOAD_PATH: "/ds"
FS_DEFAULT_FS: "s3a://xxxx"
DOLPHINSCHEDULER_ENV:
- "export HADOOP_HOME=/opt/soft/hadoop"
- "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
- "export SPARK_HOME1=/opt/soft/spark1"
- "export SPARK_HOME2=/opt/soft/spark2"
- "export PYTHON_HOME=/usr/bin/python"
- "export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk"
- "export HIVE_HOME=/opt/soft/hive"
- "export FLINK_HOME=/opt/soft/flink"
- "export DATAX_HOME=/opt/soft/datax/bin/datax.py"
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
RESOURCE_STORAGE_TYPE: "HDFS"
RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
FS_DEFAULT_FS: "file:///"
FS_S3A_ENDPOINT: "s3.xxx.amazonaws.com"
FS_S3A_ACCESS_KEY: "xxxxxxx"
FS_S3A_SECRET_KEY: "xxxxxxx"
master:
## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
podManagementPolicy: "Parallel"
## Replicas is the desired number of replicas of the given Template.
replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
## You can use annotations to attach arbitrary non-identifying metadata to objects.
## Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## NodeSelector is a selector which must be true for the pod to fit on a node.
## Selector which must match a node's labels for the pod to be scheduled on that node.
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
## Compute Resources required by this container. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
resources: {}
# limits:
# memory: "18Gi"
# cpu: "4"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
# resources:
# limits:
# memory: "8Gi"
# cpu: "4"
# requests:
# memory: "2Gi"
# cpu: "500m"
## ConfigMap
configmap:
DOLPHINSCHEDULER_OPTS: ""
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
MASTER_HEARTBEAT_INTERVAL: "10"
@ -122,6 +134,8 @@ master:
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
MASTER_LISTEN_PORT: "5678"
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
enabled: true
initialDelaySeconds: "30"
@ -138,7 +152,7 @@ master:
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
@ -150,31 +164,43 @@ master:
storage: "20Gi"
worker:
## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
podManagementPolicy: "Parallel"
## Replicas is the desired number of replicas of the given Template.
replicas: "3"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
## You can use annotations to attach arbitrary non-identifying metadata to objects.
## Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## NodeSelector is a selector which must be true for the pod to fit on a node.
## Selector which must match a node's labels for the pod to be scheduled on that node.
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
## Compute Resources required by this container. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
resources: {}
# limits:
# memory: "18Gi"
# cpu: "4"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
# resources:
# limits:
# memory: "8Gi"
# cpu: "4"
# requests:
# memory: "2Gi"
# cpu: "500m"
## ConfigMap
configmap:
DOLPHINSCHEDULER_OPTS: ""
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
WORKER_LISTEN_PORT: "1234"
WORKER_GROUP: "default"
WORKER_WEIGHT: "100"
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
@ -193,27 +219,7 @@ worker:
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
configmap:
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
WORKER_LISTEN_PORT: "1234"
WORKER_GROUP: "default"
WORKER_WEIGHT: "100"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
DOLPHINSCHEDULER_ENV:
- "export HADOOP_HOME=/opt/soft/hadoop"
- "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
- "export SPARK_HOME1=/opt/soft/spark1"
- "export SPARK_HOME2=/opt/soft/spark2"
- "export PYTHON_HOME=/opt/soft/python"
- "export JAVA_HOME=/opt/soft/java"
- "export HIVE_HOME=/opt/soft/hive"
- "export FLINK_HOME=/opt/soft/flink"
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
@ -235,38 +241,40 @@ worker:
storage: "20Gi"
alert:
## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
replicas: "1"
## The deployment strategy to use to replace existing pods with new ones.
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
## You can use annotations to attach arbitrary non-identifying metadata to objects.
## Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## NodeSelector is a selector which must be true for the pod to fit on a node.
## Selector which must match a node's labels for the pod to be scheduled on that node.
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity: {}
## Compute Resources required by this container. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
resources: {}
# limits:
# memory: "4Gi"
# cpu: "1"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
# resources:
# limits:
# memory: "2Gi"
# cpu: "1"
# requests:
# memory: "1Gi"
# cpu: "500m"
## ConfigMap
configmap:
DOLPHINSCHEDULER_OPTS: ""
ALERT_PLUGIN_DIR: "/opt/dolphinscheduler/alert/plugin"
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
@ -282,63 +290,6 @@ alert:
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
livenessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
readinessProbe:
enabled: true
initialDelaySeconds: "30"
periodSeconds: "30"
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
persistentVolumeClaim:
enabled: false
accessModes:
- "ReadWriteOnce"
storageClassName: "-"
storage: "20Gi"
api:
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
# The jvm options for java instance startup
jvmOptions: ""
resources: {}
# limits:
# memory: "4Gi"
# cpu: "2"
# requests:
# memory: "2Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
@ -357,10 +308,8 @@ api:
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
persistentVolumeClaim:
enabled: false
accessModes:
@ -368,34 +317,41 @@ api:
storageClassName: "-"
storage: "20Gi"
frontend:
api:
## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
replicas: "1"
## The deployment strategy to use to replace existing pods with new ones.
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "25%"
maxUnavailable: "25%"
replicas: "1"
# NodeSelector is a selector which must be true for the pod to fit on a node.
# Selector which must match a node's labels for the pod to be scheduled on that node.
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
## You can use annotations to attach arbitrary non-identifying metadata to objects.
## Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## NodeSelector is a selector which must be true for the pod to fit on a node.
## Selector which must match a node's labels for the pod to be scheduled on that node.
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity: {}
## Compute Resources required by this container. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
nodeSelector: {}
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
tolerations: []
# Affinity is a group of affinity scheduling rules.
# If specified, the pod's scheduling constraints.
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
affinity: {}
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
resources: {}
# limits:
# memory: "256Mi"
# resources:
# limits:
# memory: "2Gi"
# cpu: "1"
# requests:
# memory: "256Mi"
# memory: "1Gi"
# cpu: "500m"
# You can use annotations to attach arbitrary non-identifying metadata to objects.
# Clients such as tools and libraries can retrieve this metadata.
annotations: {}
## ConfigMap
configmap:
DOLPHINSCHEDULER_OPTS: ""
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
livenessProbe:
@ -414,10 +370,8 @@ frontend:
timeoutSeconds: "5"
failureThreshold: "3"
successThreshold: "1"
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
## A claim in this list takes precedence over any volumes in the template, with the same name.
## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
persistentVolumeClaim:
enabled: false
accessModes:

23
dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSender.java

@ -23,9 +23,7 @@ import org.apache.dolphinscheduler.spi.utils.JSONUtils;
import org.apache.commons.codec.binary.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpStatus;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
@ -71,11 +69,6 @@ public class FeiShuSender {
}
private static RequestConfig getProxyConfig(String proxy, int port) {
HttpHost httpProxy = new HttpHost(proxy, port);
return RequestConfig.custom().setProxy(httpProxy).build();
}
private static String textToJsonString(AlertData alertData) {
Map<String, Object> items = new HashMap<>(2);
@ -88,7 +81,7 @@ public class FeiShuSender {
return JSONUtils.toJsonString(items);
}
private static AlertResult checkSendFeiShuSendMsgResult(String result) {
public static AlertResult checkSendFeiShuSendMsgResult(String result) {
AlertResult alertResult = new AlertResult();
alertResult.setStatus("false");
@ -116,12 +109,10 @@ public class FeiShuSender {
public static String formatContent(AlertData alertData) {
if (alertData.getContent() != null) {
List<Map> list;
try {
list = JSONUtils.toList(alertData.getContent(), Map.class);
} catch (Exception e) {
logger.error("json format exception", e);
return null;
List<Map> list = JSONUtils.toList(alertData.getContent(), Map.class);
if (list.isEmpty()) {
return alertData.getTitle() + alertData.getContent();
}
StringBuilder contents = new StringBuilder(100);
@ -170,7 +161,7 @@ public class FeiShuSender {
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode != HttpStatus.SC_OK) {
logger.error("send feishu message error, return http status code: " + statusCode);
logger.error("send feishu message error, return http status code: {} ", statusCode);
}
String resp;
try {
@ -180,7 +171,7 @@ public class FeiShuSender {
} finally {
response.close();
}
logger.info("Ding Talk send title :{} ,content :{}, resp: {}", alertData.getTitle(), alertData.getContent(), resp);
logger.info("Fei Shu send title :{} ,content :{}, resp: {}", alertData.getTitle(), alertData.getContent(), resp);
return resp;
} finally {
httpClient.close();

24
dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSenderTest.java

@ -72,4 +72,28 @@ public class FeiShuSenderTest {
alertData.setContent(alertMsg);
Assert.assertNotNull(FeiShuSender.formatContent(alertData));
}
@Test
public void testSendWithFormatException() {
AlertData alertData = new AlertData();
alertData.setTitle("feishu test title");
alertData.setContent("feishu test content");
FeiShuSender feiShuSender = new FeiShuSender(feiShuConfig);
String alertResult = feiShuSender.formatContent(alertData);
Assert.assertEquals(alertResult, alertData.getTitle() + alertData.getContent());
}
@Test
public void testCheckSendFeiShuSendMsgResult() {
FeiShuSender feiShuSender = new FeiShuSender(feiShuConfig);
AlertResult alertResult = feiShuSender.checkSendFeiShuSendMsgResult("");
Assert.assertFalse(Boolean.valueOf(alertResult.getStatus()));
AlertResult alertResult2 = feiShuSender.checkSendFeiShuSendMsgResult("123");
Assert.assertEquals("send fei shu msg fail",alertResult2.getMessage());
String response = "{\"StatusCode\":\"0\",\"extra\":\"extra\",\"StatusMessage\":\"StatusMessage\"}";
AlertResult alertResult3 = feiShuSender.checkSendFeiShuSendMsgResult(response);
Assert.assertTrue(Boolean.valueOf(alertResult3.getStatus()));
}
}

2
dolphinscheduler-alert/src/main/resources/alert.properties

@ -21,7 +21,7 @@
#eg : Alert Server Listener port
#alert.plugin.dir config the Alert Plugin dir . AlertServer while find and load the Alert Plugin Jar from this dir when deploy and start AlertServer on the server .
#eg :alert.plugin.dir=/opt/soft/spi/lib/plugin/alert
alert.plugin.dir=./lib/plugin/alert
#maven.local.repository=/Users/gaojun/Documents/jianguoyun/localRepository

42
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java

@ -96,10 +96,14 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "host", value = "DATA_SOURCE_HOST", required = true, dataType = "String"),
@ApiImplicitParam(name = "port", value = "DATA_SOURCE_PORT", required = true, dataType = "String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "principal", value = "DATA_SOURCE_PRINCIPAL", dataType = "String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType = "String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String")
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String"),
@ApiImplicitParam(name = "javaSecurityKrb5Conf", value = "DATA_SOURCE_KERBEROS_KRB5_CONF", dataType = "String"),
@ApiImplicitParam(name = "loginUserKeytabUsername", value = "DATA_SOURCE_KERBEROS_KEYTAB_USERNAME", dataType = "String"),
@ApiImplicitParam(name = "loginUserKeytabPath", value = "DATA_SOURCE_KERBEROS_KEYTAB_PATH", dataType = "String")
})
@PostMapping(value = "/create")
@ResponseStatus(HttpStatus.CREATED)
@ -115,10 +119,14 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
@RequestParam(value = "other") String other,
@RequestParam(value = "javaSecurityKrb5Conf", required = false) String javaSecurityKrb5Conf,
@RequestParam(value = "loginUserKeytabUsername", required = false) String loginUserKeytabUsername,
@RequestParam(value = "loginUserKeytabPath", required = false) String loginUserKeytabPath) {
logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {}, port: {}, database : {}, principal: {}, userName : {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, host, port, database, principal, userName, connectType, other);
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other);
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other,
javaSecurityKrb5Conf, loginUserKeytabUsername, loginUserKeytabPath);
return dataSourceService.createDataSource(loginUser, name, note, type, parameter);
}
@ -149,10 +157,14 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "host", value = "DATA_SOURCE_HOST", required = true, dataType = "String"),
@ApiImplicitParam(name = "port", value = "DATA_SOURCE_PORT", required = true, dataType = "String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "principal", value = "DATA_SOURCE_PRINCIPAL", dataType = "String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType = "String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String")
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String"),
@ApiImplicitParam(name = "javaSecurityKrb5Conf", value = "DATA_SOURCE_KERBEROS_KRB5_CONF", dataType = "String"),
@ApiImplicitParam(name = "loginUserKeytabUsername", value = "DATA_SOURCE_KERBEROS_KEYTAB_USERNAME", dataType = "String"),
@ApiImplicitParam(name = "loginUserKeytabPath", value = "DATA_SOURCE_KERBEROS_KEYTAB_PATH", dataType = "String")
})
@PostMapping(value = "/update")
@ResponseStatus(HttpStatus.OK)
@ -169,10 +181,14 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
@RequestParam(value = "other") String other,
@RequestParam(value = "javaSecurityKrb5Conf", required = false) String javaSecurityKrb5Conf,
@RequestParam(value = "loginUserKeytabUsername", required = false) String loginUserKeytabUsername,
@RequestParam(value = "loginUserKeytabPath", required = false) String loginUserKeytabPath) {
logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, connectType, other);
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other);
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other,
javaSecurityKrb5Conf, loginUserKeytabUsername, loginUserKeytabPath);
return dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter);
}
@ -274,10 +290,14 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "host", value = "DATA_SOURCE_HOST", required = true, dataType = "String"),
@ApiImplicitParam(name = "port", value = "DATA_SOURCE_PORT", required = true, dataType = "String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "principal", value = "DATA_SOURCE_PRINCIPAL", dataType = "String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType = "String"),
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String")
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String"),
@ApiImplicitParam(name = "javaSecurityKrb5Conf", value = "DATA_SOURCE_KERBEROS_KRB5_CONF", dataType = "String"),
@ApiImplicitParam(name = "loginUserKeytabUsername", value = "DATA_SOURCE_KERBEROS_KEYTAB_USERNAME", dataType = "String"),
@ApiImplicitParam(name = "loginUserKeytabPath", value = "DATA_SOURCE_KERBEROS_KEYTAB_PATH", dataType = "String")
})
@PostMapping(value = "/connect")
@ResponseStatus(HttpStatus.OK)
@ -293,10 +313,14 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
@RequestParam(value = "other") String other,
@RequestParam(value = "javaSecurityKrb5Conf", required = false) String javaSecurityKrb5Conf,
@RequestParam(value = "loginUserKeytabUsername", required = false) String loginUserKeytabUsername,
@RequestParam(value = "loginUserKeytabPath", required = false) String loginUserKeytabPath) {
logger.info("login user {}, connect datasource: {}, note: {}, type: {}, connectType: {}, other: {}",
loginUser.getUserName(), name, note, type, connectType, other);
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other);
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other,
javaSecurityKrb5Conf, loginUserKeytabUsername, loginUserKeytabPath);
return dataSourceService.checkConnection(type, parameter);
}

9
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java

@ -267,6 +267,9 @@ public class DataSourceService extends BaseService {
map.put(HOST, host);
map.put(PORT, port);
map.put(PRINCIPAL, datasourceForm.getPrincipal());
map.put(Constants.KERBEROS_KRB5_CONF_PATH, datasourceForm.getJavaSecurityKrb5Conf());
map.put(Constants.KERBEROS_KEY_TAB_USERNAME, datasourceForm.getLoginUserKeytabUsername());
map.put(Constants.KERBEROS_KEY_TAB_PATH, datasourceForm.getLoginUserKeytabPath());
map.put(DATABASE, database);
map.put(USER_NAME, datasourceForm.getUser());
map.put(OTHER, otherMap);
@ -424,7 +427,8 @@ public class DataSourceService extends BaseService {
*/
public String buildParameter(DbType type, String host,
String port, String database, String principal, String userName,
String password, DbConnectType connectType, String other) {
String password, DbConnectType connectType, String other,
String javaSecurityKrb5Conf, String loginUserKeytabUsername, String loginUserKeytabPath) {
String address = buildAddress(type, host, port, connectType);
Map<String, Object> parameterMap = new LinkedHashMap<String, Object>(6);
@ -467,6 +471,9 @@ public class DataSourceService extends BaseService {
if (CommonUtils.getKerberosStartupState()
&& (type == DbType.HIVE || type == DbType.SPARK)) {
parameterMap.put(Constants.PRINCIPAL, principal);
parameterMap.put(Constants.KERBEROS_KRB5_CONF_PATH, javaSecurityKrb5Conf);
parameterMap.put(Constants.KERBEROS_KEY_TAB_USERNAME, loginUserKeytabUsername);
parameterMap.put(Constants.KERBEROS_KEY_TAB_PATH, loginUserKeytabPath);
}
Map<String, String> map = JSONUtils.toMap(other);

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java

@ -256,7 +256,7 @@ public class ProcessInstanceService extends BaseService {
List<ProcessInstance> processInstances = processInstanceList.getRecords();
for (ProcessInstance processInstance : processInstances) {
processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(), processInstance.getEndTime()));
processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()));
User executor = usersService.queryUser(processInstance.getExecutorId());
if (null != executor) {
processInstance.setExecutorName(executor.getUserName());

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java

@ -131,7 +131,7 @@ public class TaskInstanceService extends BaseService {
List<TaskInstance> taskInstanceList = taskInstanceIPage.getRecords();
for (TaskInstance taskInstance : taskInstanceList) {
taskInstance.setDuration(DateUtils.differSec(taskInstance.getStartTime(), taskInstance.getEndTime()));
taskInstance.setDuration(DateUtils.format2Duration(taskInstance.getStartTime(), taskInstance.getEndTime()));
User executor = usersService.queryUser(taskInstance.getExecutorId());
if (null != executor) {
taskInstance.setExecutorName(executor.getUserName());

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java

@ -1327,7 +1327,7 @@ public class ProcessDefinitionServiceImpl extends BaseService implements
List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineId(processId, limit);
for (ProcessInstance processInstance : processInstanceList) {
processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(), processInstance.getEndTime()));
processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()));
}
if (limit > processInstanceList.size()) {

4
dolphinscheduler-api/src/main/resources/i18n/messages.properties

@ -125,6 +125,10 @@ TENANT_CODE=os tenant code
QUEUE_NAME=queue name
PASSWORD=password
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
DATA_SOURCE_PRINCIPAL=principal
DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf
DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username
DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path
PROJECT_TAG=project related operation
CREATE_PROJECT_NOTES=create project
PROJECT_DESC=project description

4
dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties

@ -125,6 +125,10 @@ TENANT_CODE=os tenant code
QUEUE_NAME=queue name
PASSWORD=password
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
DATA_SOURCE_PRINCIPAL=principal
DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf
DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username
DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path
PROJECT_TAG=project related operation
CREATE_PROJECT_NOTES=create project
PROJECT_DESC=project description

4
dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties

@ -119,6 +119,10 @@ TENANT_CODE=操作系统租户
QUEUE_NAME=队列名
PASSWORD=密码
DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...}
DATA_SOURCE_PRINCIPAL=principal
DATA_SOURCE_KERBEROS_KRB5_CONF=kerberos认证参数 java.security.krb5.conf
DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=kerberos认证参数 login.user.keytab.username
DATA_SOURCE_KERBEROS_KEYTAB_PATH=kerberos认证参数 login.user.keytab.path
PROJECT_TAG=项目相关操作
CREATE_PROJECT_NOTES=创建项目
PROJECT_DESC=项目描述

26
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataSourceServiceTest.java

@ -23,6 +23,7 @@ import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
@ -51,7 +52,7 @@ import org.powermock.modules.junit4.PowerMockRunner;
@RunWith(PowerMockRunner.class)
@PowerMockIgnore({"sun.security.*", "javax.net.*"})
@PrepareForTest({DataSourceFactory.class})
@PrepareForTest({DataSourceFactory.class, CommonUtils.class})
public class DataSourceServiceTest {
@ -68,7 +69,7 @@ public class DataSourceServiceTest {
String dataSourceName = "dataSource01";
String dataSourceDesc = "test dataSource";
DbType dataSourceType = DbType.POSTGRESQL;
String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null);
String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null, null, null, null);
// data source exits
List<DataSource> dataSourceList = new ArrayList<>();
@ -110,7 +111,7 @@ public class DataSourceServiceTest {
String dataSourceName = "dataSource01";
String dataSourceDesc = "test dataSource";
DbType dataSourceType = DbType.POSTGRESQL;
String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null);
String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null, null, null, null);
// data source not exits
PowerMockito.when(dataSourceMapper.selectById(dataSourceId)).thenReturn(null);
@ -274,24 +275,35 @@ public class DataSourceServiceTest {
@Test
public void buildParameter() {
String param = dataSourceService.buildParameter(DbType.ORACLE, "192.168.9.1", "1521", "im"
, "", "test", "test", DbConnectType.ORACLE_SERVICE_NAME, "");
, "", "test", "test", DbConnectType.ORACLE_SERVICE_NAME, "", "", "","");
String expected = "{\"connectType\":\"ORACLE_SERVICE_NAME\",\"type\":\"ORACLE_SERVICE_NAME\",\"address\":\"jdbc:oracle:thin:@//192.168.9.1:1521\",\"database\":\"im\","
+ "\"jdbcUrl\":\"jdbc:oracle:thin:@//192.168.9.1:1521/im\",\"user\":\"test\",\"password\":\"test\"}";
Assert.assertEquals(expected, param);
PowerMockito.mockStatic(CommonUtils.class);
PowerMockito.when(CommonUtils.getKerberosStartupState()).thenReturn(true);
PowerMockito.when(CommonUtils.encodePassword(Mockito.anyString())).thenReturn("test");
param = dataSourceService.buildParameter(DbType.HIVE, "192.168.9.1", "10000", "im"
, "hive/hdfs-mycluster@ESZ.COM", "test", "test", null, "", "/opt/krb5.conf", "test2/hdfs-mycluster@ESZ.COM", "/opt/hdfs.headless.keytab");
expected = "{\"type\":null,\"address\":\"jdbc:hive2://192.168.9.1:10000\",\"database\":\"im\",\"jdbcUrl\":\"jdbc:hive2://192.168.9.1:10000/im;principal=hive/hdfs-mycluster@ESZ.COM\","
+ "\"user\":\"test\",\"password\":\"test\",\"principal\":\"hive/hdfs-mycluster@ESZ.COM\",\"javaSecurityKrb5Conf\":\"/opt/krb5.conf\","
+ "\"loginUserKeytabUsername\":\"test2/hdfs-mycluster@ESZ.COM\",\"loginUserKeytabPath\":\"/opt/hdfs.headless.keytab\"}";
Assert.assertEquals(expected, param);
}
@Test
public void buildParameterWithDecodePassword() {
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE, "true");
String param = dataSourceService.buildParameter(DbType.MYSQL, "192.168.9.1", "1521", "im"
, "", "test", "123456", null, "");
, "", "test", "123456", null, "", "", "", "");
String expected = "{\"type\":null,\"address\":\"jdbc:mysql://192.168.9.1:1521\",\"database\":\"im\",\"jdbcUrl\":\"jdbc:mysql://192.168.9.1:1521/im\","
+ "\"user\":\"test\",\"password\":\"IUAjJCVeJipNVEl6TkRVMg==\"}";
Assert.assertEquals(expected, param);
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE, "false");
param = dataSourceService.buildParameter(DbType.MYSQL, "192.168.9.1", "1521", "im"
, "", "test", "123456", null, "");
, "", "test", "123456", null, "", "", "", "");
expected = "{\"type\":null,\"address\":\"jdbc:mysql://192.168.9.1:1521\",\"database\":\"im\",\"jdbcUrl\":\"jdbc:mysql://192.168.9.1:1521/im\",\"user\":\"test\",\"password\":\"123456\"}";
Assert.assertEquals(expected, param);
}
@ -316,7 +328,7 @@ public class DataSourceServiceTest {
@Test
public void testCheckConnection() throws Exception {
DbType dataSourceType = DbType.POSTGRESQL;
String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null);
String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null, null, null, null);
PowerMockito.mockStatic(DataSourceFactory.class);
PowerMockito.when(DataSourceFactory.getDatasource(Mockito.any(), Mockito.anyString())).thenReturn(null);

14
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java

@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.common;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import java.util.regex.Pattern;
@ -146,7 +147,7 @@ public final class Constants {
*/
public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs";
public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties";
public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js";
/**
* development.state
@ -956,7 +957,9 @@ public final class Constants {
public static final String PRINCIPAL = "principal";
public static final String OTHER = "other";
public static final String ORACLE_DB_CONNECT_TYPE = "connectType";
public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf";
public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername";
public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath";
/**
* session timeout
@ -1040,4 +1043,11 @@ public final class Constants {
* pstree, get pud and sub pid
*/
public static final String PSTREE = "pstree";
/**
* docker & kubernetes
*/
public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER"));
public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT"));
}

19
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java

@ -86,13 +86,26 @@ public class CommonUtils {
* @throws Exception errors
*/
public static void loadKerberosConf() throws Exception {
loadKerberosConf(PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
/**
* load kerberos configuration
* @param javaSecurityKrb5Conf javaSecurityKrb5Conf
* @param loginUserKeytabUsername loginUserKeytabUsername
* @param loginUserKeytabPath loginUserKeytabPath
* @throws Exception errors
*/
public static void loadKerberosConf(String javaSecurityKrb5Conf, String loginUserKeytabUsername, String loginUserKeytabPath) throws Exception {
if (CommonUtils.getKerberosStartupState()) {
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, StringUtils.defaultIfBlank(javaSecurityKrb5Conf, PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH)));
Configuration configuration = new Configuration();
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, Constants.KERBEROS);
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
UserGroupInformation.loginUserFromKeytab(StringUtils.defaultIfBlank(loginUserKeytabUsername, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME)),
StringUtils.defaultIfBlank(loginUserKeytabPath, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH)));
}
}

86
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java

@ -241,15 +241,50 @@ public class DateUtils {
*/
public static String format2Readable(long ms) {
long days = ms / (1000 * 60 * 60 * 24);
long hours = (ms % (1000 * 60 * 60 * 24)) / (1000 * 60 * 60);
long minutes = (ms % (1000 * 60 * 60)) / (1000 * 60);
long seconds = (ms % (1000 * 60)) / 1000;
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
/**
*
* format time to duration
*
* @param d1 d1
* @param d2 d2
* @return format time
*/
public static String format2Duration(Date d1, Date d2) {
return format2Duration(differMs(d1, d2));
}
/**
* format time to duration
*
* @param ms ms
* @return format time
*/
public static String format2Duration(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
StringBuilder strBuilder = new StringBuilder();
strBuilder = days > 0 ? strBuilder.append(days).append("d").append(" ") : strBuilder;
strBuilder = hours > 0 ? strBuilder.append(hours).append("h").append(" ") : strBuilder;
strBuilder = minutes > 0 ? strBuilder.append(minutes).append("m").append(" ") : strBuilder;
strBuilder = seconds > 0 ? strBuilder.append(seconds).append("s") : strBuilder;
return strBuilder.toString();
}
/**
* get monday
* <p>
@ -454,4 +489,47 @@ public class DateUtils {
return getCurrentTime(Constants.YYYYMMDDHHMMSSSSS);
}
static final long C0 = 1L;
static final long C1 = C0 * 1000L;
static final long C2 = C1 * 1000L;
static final long C3 = C2 * 1000L;
static final long C4 = C3 * 60L;
static final long C5 = C4 * 60L;
static final long C6 = C5 * 24L;
/**
* Time unit representing one thousandth of a second
*/
public static class MILLISECONDS {
public static long toSeconds(long d) {
return d / (C3 / C2);
}
public static long toMinutes(long d) {
return d / (C4 / C2);
}
public static long toHours(long d) {
return d / (C5 / C2);
}
public static long toDays(long d) {
return d / (C6 / C2);
}
public static long toDurationSeconds(long d) {
return (d % (C4 / C2)) / (C3 / C2);
}
public static long toDurationMinutes(long d) {
return (d % (C5 / C2)) / (C4 / C2);
}
public static long toDurationHours(long d) {
return (d % (C6 / C2)) / (C5 / C2);
}
}
}

29
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java

@ -211,25 +211,13 @@ public class JSONUtils {
/**
* json to map
* <p>
* {@link #toMap(String, Class, Class)}
*
* @param json json
* @return json to map
*/
public static Map<String, String> toMap(String json) {
if (StringUtils.isEmpty(json)) {
return null;
}
try {
return objectMapper.readValue(json, new TypeReference<Map<String, String>>() {
});
} catch (Exception e) {
logger.error("json to map exception!", e);
}
return null;
return parseObject(json, new TypeReference<Map<String, String>>() {});
}
/**
@ -243,13 +231,24 @@ public class JSONUtils {
* @return to map
*/
public static <K, V> Map<K, V> toMap(String json, Class<K> classK, Class<V> classV) {
return parseObject(json, new TypeReference<Map<K, V>>() {});
}
/**
* json to object
*
* @param json json string
* @param type type reference
* @param <T>
* @return return parse object
*/
public static <T> T parseObject(String json, TypeReference<T> type) {
if (StringUtils.isEmpty(json)) {
return null;
}
try {
return objectMapper.readValue(json, new TypeReference<Map<K, V>>() {
});
return objectMapper.readValue(json, type);
} catch (Exception e) {
logger.error("json to map exception!", e);
}

26
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/NetUtils.java

@ -49,8 +49,6 @@ public class NetUtils {
private static final String NETWORK_PRIORITY_INNER = "inner";
private static final String NETWORK_PRIORITY_OUTER = "outer";
private static final Logger logger = LoggerFactory.getLogger(NetUtils.class);
private static final String ANY_HOST_VALUE = "0.0.0.0";
private static final String LOCAL_HOST_VALUE = "127.0.0.1";
private static InetAddress LOCAL_ADDRESS = null;
private static volatile String HOST_ADDRESS;
@ -58,6 +56,22 @@ public class NetUtils {
throw new UnsupportedOperationException("Construct NetUtils");
}
/**
* get addr like host:port
* @return addr
*/
public static String getAddr(String host, int port) {
return String.format("%s:%d", host, port);
}
/**
* get addr like host:port
* @return addr
*/
public static String getAddr(int port) {
return getAddr(getHost(), port);
}
public static String getHost() {
if (HOST_ADDRESS != null) {
return HOST_ADDRESS;
@ -65,10 +79,10 @@ public class NetUtils {
InetAddress address = getLocalAddress();
if (address != null) {
HOST_ADDRESS = address.getHostAddress();
HOST_ADDRESS = Constants.KUBERNETES_MODE ? address.getHostName() : address.getHostAddress();
return HOST_ADDRESS;
}
return LOCAL_HOST_VALUE;
return Constants.KUBERNETES_MODE ? "localhost" : "127.0.0.1";
}
private static InetAddress getLocalAddress() {
@ -153,8 +167,8 @@ public class NetUtils {
String name = address.getHostAddress();
return (name != null
&& IP_PATTERN.matcher(name).matches()
&& !ANY_HOST_VALUE.equals(name)
&& !LOCAL_HOST_VALUE.equals(name));
&& !address.isAnyLocalAddress()
&& !address.isLoopbackAddress());
}
/**

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java

@ -389,6 +389,16 @@ public class OSUtils {
return null;
}
/**
* get sudo command
* @param tenantCode tenantCode
* @param command command
* @return result of sudo execute command
*/
public static String getSudoCmd(String tenantCode, String command) {
return StringUtils.isEmpty(tenantCode) ? command : "sudo -u " + tenantCode + " " + command;
}
/**
* Execute the corresponding command of Linux or Windows
*

4
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/StringUtils.java

@ -62,6 +62,10 @@ public class StringUtils {
return str == null ? null : str.trim();
}
public static String defaultIfBlank(String str, String defaultStr) {
return isBlank(str) ? defaultStr : str;
}
public static boolean equalsIgnoreCase(String str1, String str2) {
return str1 == null ? str2 == null : str1.equalsIgnoreCase(str2);
}

2
dolphinscheduler-common/src/main/resources/common.properties

@ -37,7 +37,7 @@ login.user.keytab.username=hdfs-mycluster@ESZ.COM
login.user.keytab.path=/opt/hdfs.headless.keytab
#resource.view.suffixs
#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
# if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path
hdfs.root.user=hdfs

39
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/DateUtilsTest.java

@ -157,4 +157,43 @@ public class DateUtilsTest {
Assert.assertNotNull(timeStamp);
}
@Test
public void testFormat2Duration() {
// days hours minutes seconds
Date d1 = DateUtils.stringToDate("2020-01-20 11:00:00");
Date d2 = DateUtils.stringToDate("2020-01-21 12:10:10");
String duration = DateUtils.format2Duration(d2, d1);
Assert.assertEquals("1d 1h 10m 10s", duration);
// hours minutes seconds
d1 = DateUtils.stringToDate("2020-01-20 11:00:00");
d2 = DateUtils.stringToDate("2020-01-20 12:10:10");
duration = DateUtils.format2Duration(d2, d1);
Assert.assertEquals("1h 10m 10s", duration);
// minutes seconds
d1 = DateUtils.stringToDate("2020-01-20 11:00:00");
d2 = DateUtils.stringToDate("2020-01-20 11:10:10");
duration = DateUtils.format2Duration(d2, d1);
Assert.assertEquals("10m 10s", duration);
// minutes seconds
d1 = DateUtils.stringToDate("2020-01-20 11:10:00");
d2 = DateUtils.stringToDate("2020-01-20 11:10:10");
duration = DateUtils.format2Duration(d2, d1);
Assert.assertEquals("10s", duration);
d1 = DateUtils.stringToDate("2020-01-20 11:10:00");
d2 = DateUtils.stringToDate("2020-01-21 11:10:10");
duration = DateUtils.format2Duration(d2, d1);
Assert.assertEquals("1d 10s", duration);
d1 = DateUtils.stringToDate("2020-01-20 11:10:00");
d2 = DateUtils.stringToDate("2020-01-20 16:10:10");
duration = DateUtils.format2Duration(d2, d1);
Assert.assertEquals("5h 10s", duration);
}
}

9
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/NetUtilsTest.java

@ -29,6 +29,13 @@ import static org.mockito.Mockito.when;
*/
public class NetUtilsTest {
@Test
public void testGetAddr() {
assertEquals(NetUtils.getHost() + ":5678", NetUtils.getAddr(5678));
assertEquals("127.0.0.1:5678", NetUtils.getAddr("127.0.0.1", 5678));
assertEquals("localhost:1234", NetUtils.getAddr("localhost", 1234));
}
@Test
public void testGetLocalHost() {
assertNotNull(NetUtils.getHost());
@ -45,9 +52,11 @@ public class NetUtilsTest {
assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("0.0.0.0");
when(address.isAnyLocalAddress()).thenReturn(true);
assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("127.0.0.1");
when(address.isLoopbackAddress()).thenReturn(true);
assertFalse(NetUtils.isValidV4Address(address));
address = mock(InetAddress.class);
when(address.getHostAddress()).thenReturn("1.2.3.4");

13
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java

@ -75,6 +75,13 @@ public class OSUtilsTest {
}
}
@Test
public void testGetSudoCmd() {
String cmd = "kill -9 1234";
String sudoCmd = OSUtils.getSudoCmd("test123", cmd);
Assert.assertEquals("sudo -u test123 " + cmd, sudoCmd);
}
@Test
public void exeCmd() {
if(OSUtils.isMacOS() || !OSUtils.isWindows()){
@ -92,12 +99,6 @@ public class OSUtilsTest {
Assert.assertNotEquals(0, processId);
}
@Test
public void getHost(){
String host = NetUtils.getHost();
Assert.assertNotNull(host);
Assert.assertNotEquals("", host);
}
@Test
public void checkResource(){
boolean resource = OSUtils.checkResource(100,0);
Assert.assertTrue(resource);

18
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringUtilsTest.java

@ -77,4 +77,22 @@ public class StringUtilsTest {
String result4 = StringUtils.replaceNRTtoUnderline(null);
Assert.assertNull(result4);
}
@Test
public void testTrim() {
String trim = StringUtils.trim(null);
Assert.assertNull(trim);
trim = StringUtils.trim(" test ");
Assert.assertEquals("test", trim);
}
@Test
public void testDefaultIfBlank() {
String defaultStr = StringUtils.defaultIfBlank("", "defaultStr");
Assert.assertEquals("defaultStr", defaultStr);
defaultStr = StringUtils.defaultIfBlank("test", "defaultStr");
Assert.assertEquals("test", defaultStr);
}
}

38
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSource.java

@ -64,6 +64,21 @@ public abstract class BaseDataSource {
*/
private String principal;
/**
* java.security.krb5.conf
*/
private String javaSecurityKrb5Conf;
/**
* login.user.keytab.username
*/
private String loginUserKeytabUsername;
/**
* login.user.keytab.path
*/
private String loginUserKeytabPath;
public String getPrincipal() {
return principal;
}
@ -211,4 +226,27 @@ public abstract class BaseDataSource {
this.other = other;
}
public String getJavaSecurityKrb5Conf() {
return javaSecurityKrb5Conf;
}
public void setJavaSecurityKrb5Conf(String javaSecurityKrb5Conf) {
this.javaSecurityKrb5Conf = javaSecurityKrb5Conf;
}
public String getLoginUserKeytabUsername() {
return loginUserKeytabUsername;
}
public void setLoginUserKeytabUsername(String loginUserKeytabUsername) {
this.loginUserKeytabUsername = loginUserKeytabUsername;
}
public String getLoginUserKeytabPath() {
return loginUserKeytabPath;
}
public void setLoginUserKeytabPath(String loginUserKeytabPath) {
this.loginUserKeytabPath = loginUserKeytabPath;
}
}

2
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/HiveDataSource.java

@ -96,7 +96,7 @@ public class HiveDataSource extends BaseDataSource {
*/
@Override
public Connection getConnection() throws Exception {
CommonUtils.loadKerberosConf();
CommonUtils.loadKerberosConf(getJavaSecurityKrb5Conf(), getLoginUserKeytabUsername(), getLoginUserKeytabPath());
return super.getConnection();
}

2
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SparkDataSource.java

@ -52,7 +52,7 @@ public class SparkDataSource extends BaseDataSource {
*/
@Override
public Connection getConnection() throws Exception {
CommonUtils.loadKerberosConf();
CommonUtils.loadKerberosConf(getJavaSecurityKrb5Conf(), getLoginUserKeytabUsername(), getLoginUserKeytabPath());
return super.getConnection();
}
}

6
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessInstance.java

@ -202,7 +202,7 @@ public class ProcessInstance {
* @return
*/
@TableField(exist = false)
private Long duration;
private String duration;
/**
* process instance priority
@ -547,11 +547,11 @@ public class ProcessInstance {
this.dependenceScheduleTimes = dependenceScheduleTimes;
}
public Long getDuration() {
public String getDuration() {
return duration;
}
public void setDuration(Long duration) {
public void setDuration(String duration) {
this.duration = duration;
}

8
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/TaskInstance.java

@ -14,6 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.entity;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
@ -170,7 +171,7 @@ public class TaskInstance implements Serializable {
* duration
*/
@TableField(exist = false)
private Long duration;
private String duration;
/**
* max retry times
@ -437,11 +438,11 @@ public class TaskInstance implements Serializable {
this.processInstanceName = processInstanceName;
}
public Long getDuration() {
public String getDuration() {
return duration;
}
public void setDuration(Long duration) {
public void setDuration(String duration) {
this.duration = duration;
}
@ -505,7 +506,6 @@ public class TaskInstance implements Serializable {
return TaskType.CONDITIONS.equals(TaskType.valueOf(this.taskType));
}
/**
* determine if you can try again
*

269
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSourceTest.java

@ -14,151 +14,148 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.datasource;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.junit.Assert;
import org.junit.Test;
public class BaseDataSourceTest {
@Test
public void testDriverClassSelector() {
String mysqlDriverClass = new MySQLDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_MYSQL_JDBC_DRIVER, mysqlDriverClass);
String clickHouseDriverClass = new ClickHouseDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_CLICKHOUSE_JDBC_DRIVER, clickHouseDriverClass);
String db2ServerDriverClass = new DB2ServerDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_DB2_JDBC_DRIVER, db2ServerDriverClass);
String oracleDriverClass = new OracleDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_ORACLE_JDBC_DRIVER, oracleDriverClass);
String postgreDriverClass = new PostgreDataSource().driverClassSelector();
Assert.assertEquals(Constants.ORG_POSTGRESQL_DRIVER, postgreDriverClass);
String sqlServerDriverClass = new SQLServerDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_SQLSERVER_JDBC_DRIVER, sqlServerDriverClass);
String hiveDriverClass = new HiveDataSource().driverClassSelector();
Assert.assertEquals(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER, hiveDriverClass);
String sparkDriverClass = new SparkDataSource().driverClassSelector();
Assert.assertEquals(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER, sparkDriverClass);
}
@Test
public void testGetJdbcUrl() {
BaseDataSource hiveDataSource = new HiveDataSource();
hiveDataSource.setAddress("jdbc:hive2://127.0.0.1:10000");
hiveDataSource.setDatabase("test");
hiveDataSource.setPassword("123456");
hiveDataSource.setUser("test");
Assert.assertEquals("jdbc:hive2://127.0.0.1:10000/test", hiveDataSource.getJdbcUrl());
//set principal
hiveDataSource.setPrincipal("hive/test.com@TEST.COM");
Assert.assertEquals("jdbc:hive2://127.0.0.1:10000/test;principal=hive/test.com@TEST.COM",
hiveDataSource.getJdbcUrl());
//set fake other
hiveDataSource.setOther("charset=UTF-8");
Assert.assertEquals(
"jdbc:hive2://127.0.0.1:10000/test;principal=hive/test.com@TEST.COM;charset=UTF-8",
hiveDataSource.getJdbcUrl());
BaseDataSource clickHouseDataSource = new ClickHouseDataSource();
clickHouseDataSource.setAddress("jdbc:clickhouse://127.0.0.1:8123");
clickHouseDataSource.setDatabase("test");
clickHouseDataSource.setPassword("123456");
clickHouseDataSource.setUser("test");
Assert.assertEquals("jdbc:clickhouse://127.0.0.1:8123/test", clickHouseDataSource.getJdbcUrl());
//set fake principal
clickHouseDataSource.setPrincipal("fake principal");
Assert.assertEquals("jdbc:clickhouse://127.0.0.1:8123/test", clickHouseDataSource.getJdbcUrl());
//set fake other
clickHouseDataSource.setOther("charset=UTF-8");
Assert.assertEquals("jdbc:clickhouse://127.0.0.1:8123/test?charset=UTF-8",
clickHouseDataSource.getJdbcUrl());
BaseDataSource sqlServerDataSource = new SQLServerDataSource();
sqlServerDataSource.setAddress("jdbc:sqlserver://127.0.0.1:1433");
sqlServerDataSource.setDatabase("test");
sqlServerDataSource.setPassword("123456");
sqlServerDataSource.setUser("test");
Assert.assertEquals("jdbc:sqlserver://127.0.0.1:1433;databaseName=test",
sqlServerDataSource.getJdbcUrl());
//set fake principal
sqlServerDataSource.setPrincipal("fake principal");
Assert.assertEquals("jdbc:sqlserver://127.0.0.1:1433;databaseName=test",
sqlServerDataSource.getJdbcUrl());
//set fake other
sqlServerDataSource.setOther("charset=UTF-8");
Assert.assertEquals("jdbc:sqlserver://127.0.0.1:1433;databaseName=test;charset=UTF-8",
sqlServerDataSource.getJdbcUrl());
BaseDataSource db2DataSource = new DB2ServerDataSource();
db2DataSource.setAddress("jdbc:db2://127.0.0.1:50000");
db2DataSource.setDatabase("test");
db2DataSource.setPassword("123456");
db2DataSource.setUser("test");
Assert.assertEquals("jdbc:db2://127.0.0.1:50000/test", db2DataSource.getJdbcUrl());
//set fake principal
db2DataSource.setPrincipal("fake principal");
Assert.assertEquals("jdbc:db2://127.0.0.1:50000/test", db2DataSource.getJdbcUrl());
//set fake other
db2DataSource.setOther("charset=UTF-8");
Assert.assertEquals("jdbc:db2://127.0.0.1:50000/test:charset=UTF-8", db2DataSource.getJdbcUrl());
}
@Test
public void testGetPassword() {
BaseDataSource dataSource = new BaseDataSource() {
@Override
public String driverClassSelector() {
return null;
}
@Override
public DbType dbTypeSelector() {
return null;
}
};
String password= "";
dataSource.setPassword(password);
Assert.assertEquals("", dataSource.getPassword());
password= "IUAjJCVeJipNVEl6TkRVMg==";
dataSource.setPassword(password);
Assert.assertNotNull(dataSource.getPassword());
Assert.assertNotNull(dataSource.getPassword());
dataSource.setPassword(password);
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"true");
Assert.assertEquals("123456", dataSource.getPassword());
dataSource.setPassword(password);
Assert.assertEquals("123456", dataSource.getPassword());
Assert.assertEquals("123456", dataSource.getPassword());
Assert.assertEquals("123456", dataSource.getPassword());
dataSource.setPassword(password);
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"false");
Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==", dataSource.getPassword());
dataSource.setPassword(password);
Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==", dataSource.getPassword());
Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==", dataSource.getPassword());
Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==", dataSource.getPassword());
}
@Test
public void testDriverClassSelector() {
String mysqlDriverClass = new MySQLDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_MYSQL_JDBC_DRIVER, mysqlDriverClass);
String clickHouseDriverClass = new ClickHouseDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_CLICKHOUSE_JDBC_DRIVER, clickHouseDriverClass);
String db2ServerDriverClass = new DB2ServerDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_DB2_JDBC_DRIVER, db2ServerDriverClass);
String oracleDriverClass = new OracleDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_ORACLE_JDBC_DRIVER, oracleDriverClass);
String postgreDriverClass = new PostgreDataSource().driverClassSelector();
Assert.assertEquals(Constants.ORG_POSTGRESQL_DRIVER, postgreDriverClass);
String sqlServerDriverClass = new SQLServerDataSource().driverClassSelector();
Assert.assertEquals(Constants.COM_SQLSERVER_JDBC_DRIVER, sqlServerDriverClass);
String hiveDriverClass = new HiveDataSource().driverClassSelector();
Assert.assertEquals(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER, hiveDriverClass);
String sparkDriverClass = new SparkDataSource().driverClassSelector();
Assert.assertEquals(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER, sparkDriverClass);
}
@Test
public void testGetJdbcUrl() {
BaseDataSource hiveDataSource = new HiveDataSource();
hiveDataSource.setAddress("jdbc:hive2://127.0.0.1:10000");
hiveDataSource.setDatabase("test");
hiveDataSource.setPassword("123456");
hiveDataSource.setUser("test");
Assert.assertEquals("jdbc:hive2://127.0.0.1:10000/test", hiveDataSource.getJdbcUrl());
//set principal
hiveDataSource.setPrincipal("hive/test.com@TEST.COM");
Assert.assertEquals("jdbc:hive2://127.0.0.1:10000/test;principal=hive/test.com@TEST.COM",
hiveDataSource.getJdbcUrl());
//set fake other
hiveDataSource.setOther("charset=UTF-8");
Assert.assertEquals(
"jdbc:hive2://127.0.0.1:10000/test;principal=hive/test.com@TEST.COM;charset=UTF-8",
hiveDataSource.getJdbcUrl());
BaseDataSource clickHouseDataSource = new ClickHouseDataSource();
clickHouseDataSource.setAddress("jdbc:clickhouse://127.0.0.1:8123");
clickHouseDataSource.setDatabase("test");
clickHouseDataSource.setPassword("123456");
clickHouseDataSource.setUser("test");
Assert.assertEquals("jdbc:clickhouse://127.0.0.1:8123/test", clickHouseDataSource.getJdbcUrl());
//set fake principal
clickHouseDataSource.setPrincipal("fake principal");
Assert.assertEquals("jdbc:clickhouse://127.0.0.1:8123/test", clickHouseDataSource.getJdbcUrl());
//set fake other
clickHouseDataSource.setOther("charset=UTF-8");
Assert.assertEquals("jdbc:clickhouse://127.0.0.1:8123/test?charset=UTF-8",
clickHouseDataSource.getJdbcUrl());
BaseDataSource sqlServerDataSource = new SQLServerDataSource();
sqlServerDataSource.setAddress("jdbc:sqlserver://127.0.0.1:1433");
sqlServerDataSource.setDatabase("test");
sqlServerDataSource.setPassword("123456");
sqlServerDataSource.setUser("test");
Assert.assertEquals("jdbc:sqlserver://127.0.0.1:1433;databaseName=test",
sqlServerDataSource.getJdbcUrl());
//set fake principal
sqlServerDataSource.setPrincipal("fake principal");
Assert.assertEquals("jdbc:sqlserver://127.0.0.1:1433;databaseName=test",
sqlServerDataSource.getJdbcUrl());
//set fake other
sqlServerDataSource.setOther("charset=UTF-8");
Assert.assertEquals("jdbc:sqlserver://127.0.0.1:1433;databaseName=test;charset=UTF-8",
sqlServerDataSource.getJdbcUrl());
BaseDataSource db2DataSource = new DB2ServerDataSource();
db2DataSource.setAddress("jdbc:db2://127.0.0.1:50000");
db2DataSource.setDatabase("test");
db2DataSource.setPassword("123456");
db2DataSource.setUser("test");
Assert.assertEquals("jdbc:db2://127.0.0.1:50000/test", db2DataSource.getJdbcUrl());
//set fake principal
db2DataSource.setPrincipal("fake principal");
Assert.assertEquals("jdbc:db2://127.0.0.1:50000/test", db2DataSource.getJdbcUrl());
//set fake other
db2DataSource.setOther("charset=UTF-8");
Assert.assertEquals("jdbc:db2://127.0.0.1:50000/test:charset=UTF-8", db2DataSource.getJdbcUrl());
}
@Test
public void testGetPassword() {
BaseDataSource dataSource = new BaseDataSource() {
@Override
public String driverClassSelector() {
return null;
}
@Override
public DbType dbTypeSelector() {
return null;
}
};
String password = "";
dataSource.setPassword(password);
Assert.assertEquals("", dataSource.getPassword());
password = "IUAjJCVeJipNVEl6TkRVMg==";
dataSource.setPassword(password);
Assert.assertNotNull(dataSource.getPassword());
Assert.assertNotNull(dataSource.getPassword());
dataSource.setPassword(password);
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"true");
Assert.assertEquals("123456", dataSource.getPassword());
dataSource.setPassword(password);
Assert.assertEquals("123456", dataSource.getPassword());
Assert.assertEquals("123456", dataSource.getPassword());
Assert.assertEquals("123456", dataSource.getPassword());
dataSource.setPassword(password);
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"false");
Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==", dataSource.getPassword());
dataSource.setPassword(password);
Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==", dataSource.getPassword());
Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==", dataSource.getPassword());
Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==", dataSource.getPassword());
}
}

4
dolphinscheduler-dist/release-docs/LICENSE vendored

@ -236,6 +236,7 @@ The text of each license is also included at licenses/LICENSE-[project].txt.
commons-configuration 1.10: https://mvnrepository.com/artifact/commons-configuration/commons-configuration/1.10, Apache 2.0
commons-daemon 1.0.13 https://mvnrepository.com/artifact/commons-daemon/commons-daemon/1.0.13, Apache 2.0
commons-dbcp 1.4: https://github.com/apache/commons-dbcp, Apache 2.0
commons-email 1.5: https://github.com/apache/commons-email, Apache 2.0
commons-httpclient 3.0.1: https://mvnrepository.com/artifact/commons-httpclient/commons-httpclient/3.0.1, Apache 2.0
commons-io 2.4: https://github.com/apache/commons-io, Apache 2.0
commons-lang 2.6: https://github.com/apache/commons-lang, Apache 2.0
@ -321,6 +322,7 @@ The text of each license is also included at licenses/LICENSE-[project].txt.
jpam 1.1: https://mvnrepository.com/artifact/net.sf.jpam/jpam/1.1, Apache 2.0
jsqlparser 2.1: https://github.com/JSQLParser/JSqlParser, Apache 2.0 or LGPL 2.1
jsr305 3.0.0: https://mvnrepository.com/artifact/com.google.code.findbugs/jsr305, Apache 2.0
jsr305 1.3.9: https://mvnrepository.com/artifact/com.google.code.findbugs/jsr305, Apache 2.0
j2objc-annotations 1.1 https://mvnrepository.com/artifact/com.google.j2objc/j2objc-annotations/1.1, Apache 2.0
libfb303 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libfb303/0.9.3, Apache 2.0
libthrift 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libthrift/0.9.3, Apache 2.0
@ -412,8 +414,10 @@ CDDL licenses
The following components are provided under the CDDL License. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
activation 1.1: https://mvnrepository.com/artifact/javax.activation/activation/1.1 CDDL 1.0
javax.activation-api 1.2.0: https://mvnrepository.com/artifact/javax.activation/javax.activation-api/1.2.0, CDDL and LGPL 2.0
javax.annotation-api 1.3.2: https://mvnrepository.com/artifact/javax.annotation/javax.annotation-api/1.3.2, CDDL + GPLv2
javax.mail 1.6.2: https://mvnrepository.com/artifact/com.sun.mail/javax.mail/1.6.2, CDDL/GPLv2
javax.servlet-api 3.1.0: https://mvnrepository.com/artifact/javax.servlet/javax.servlet-api/3.1.0, CDDL + GPLv2
jaxb-api 2.3.1: https://mvnrepository.com/artifact/javax.xml.bind/jaxb-api/2.3.1, CDDL 1.1
jaxb-impl 2.2.3-1: https://mvnrepository.com/artifact/com.sun.xml.bind/jaxb-impl/2.2.3-1, CDDL and GPL 1.1

759
dolphinscheduler-dist/release-docs/licenses/LICENSE-javax.mail.txt vendored

@ -0,0 +1,759 @@
COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1
1. Definitions.
1.1. "Contributor" means each individual or entity that creates or
contributes to the creation of Modifications.
1.2. "Contributor Version" means the combination of the Original
Software, prior Modifications used by a Contributor (if any), and
the Modifications made by that particular Contributor.
1.3. "Covered Software" means (a) the Original Software, or (b)
Modifications, or (c) the combination of files containing Original
Software with files containing Modifications, in each case including
portions thereof.
1.4. "Executable" means the Covered Software in any form other than
Source Code.
1.5. "Initial Developer" means the individual or entity that first
makes Original Software available under this License.
1.6. "Larger Work" means a work which combines Covered Software or
portions thereof with code not governed by the terms of this License.
1.7. "License" means this document.
1.8. "Licensable" means having the right to grant, to the maximum
extent possible, whether at the time of the initial grant or
subsequently acquired, any and all of the rights conveyed herein.
1.9. "Modifications" means the Source Code and Executable form of
any of the following:
A. Any file that results from an addition to, deletion from or
modification of the contents of a file containing Original Software
or previous Modifications;
B. Any new file that contains any part of the Original Software or
previous Modification; or
C. Any new file that is contributed or otherwise made available
under the terms of this License.
1.10. "Original Software" means the Source Code and Executable form
of computer software code that is originally released under this
License.
1.11. "Patent Claims" means any patent claim(s), now owned or
hereafter acquired, including without limitation, method, process,
and apparatus claims, in any patent Licensable by grantor.
1.12. "Source Code" means (a) the common form of computer software
code in which modifications are made and (b) associated
documentation included in or with such code.
1.13. "You" (or "Your") means an individual or a legal entity
exercising rights under, and complying with all of the terms of,
this License. For legal entities, "You" includes any entity which
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants.
2.1. The Initial Developer Grant.
Conditioned upon Your compliance with Section 3.1 below and subject
to third party intellectual property claims, the Initial Developer
hereby grants You a world-wide, royalty-free, non-exclusive license:
(a) under intellectual property rights (other than patent or
trademark) Licensable by Initial Developer, to use, reproduce,
modify, display, perform, sublicense and distribute the Original
Software (or portions thereof), with or without Modifications,
and/or as part of a Larger Work; and
(b) under Patent Claims infringed by the making, using or selling of
Original Software, to make, have made, use, practice, sell, and
offer for sale, and/or otherwise dispose of the Original Software
(or portions thereof).
(c) The licenses granted in Sections 2.1(a) and (b) are effective on
the date Initial Developer first distributes or otherwise makes the
Original Software available to a third party under the terms of this
License.
(d) Notwithstanding Section 2.1(b) above, no patent license is
granted: (1) for code that You delete from the Original Software, or
(2) for infringements caused by: (i) the modification of the
Original Software, or (ii) the combination of the Original Software
with other software or devices.
2.2. Contributor Grant.
Conditioned upon Your compliance with Section 3.1 below and subject
to third party intellectual property claims, each Contributor hereby
grants You a world-wide, royalty-free, non-exclusive license:
(a) under intellectual property rights (other than patent or
trademark) Licensable by Contributor to use, reproduce, modify,
display, perform, sublicense and distribute the Modifications
created by such Contributor (or portions thereof), either on an
unmodified basis, with other Modifications, as Covered Software
and/or as part of a Larger Work; and
(b) under Patent Claims infringed by the making, using, or selling
of Modifications made by that Contributor either alone and/or in
combination with its Contributor Version (or portions of such
combination), to make, use, sell, offer for sale, have made, and/or
otherwise dispose of: (1) Modifications made by that Contributor (or
portions thereof); and (2) the combination of Modifications made by
that Contributor with its Contributor Version (or portions of such
combination).
(c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective
on the date Contributor first distributes or otherwise makes the
Modifications available to a third party.
(d) Notwithstanding Section 2.2(b) above, no patent license is
granted: (1) for any code that Contributor has deleted from the
Contributor Version; (2) for infringements caused by: (i) third
party modifications of Contributor Version, or (ii) the combination
of Modifications made by that Contributor with other software
(except as part of the Contributor Version) or other devices; or (3)
under Patent Claims infringed by Covered Software in the absence of
Modifications made by that Contributor.
3. Distribution Obligations.
3.1. Availability of Source Code.
Any Covered Software that You distribute or otherwise make available
in Executable form must also be made available in Source Code form
and that Source Code form must be distributed only under the terms
of this License. You must include a copy of this License with every
copy of the Source Code form of the Covered Software You distribute
or otherwise make available. You must inform recipients of any such
Covered Software in Executable form as to how they can obtain such
Covered Software in Source Code form in a reasonable manner on or
through a medium customarily used for software exchange.
3.2. Modifications.
The Modifications that You create or to which You contribute are
governed by the terms of this License. You represent that You
believe Your Modifications are Your original creation(s) and/or You
have sufficient rights to grant the rights conveyed by this License.
3.3. Required Notices.
You must include a notice in each of Your Modifications that
identifies You as the Contributor of the Modification. You may not
remove or alter any copyright, patent or trademark notices contained
within the Covered Software, or any notices of licensing or any
descriptive text giving attribution to any Contributor or the
Initial Developer.
3.4. Application of Additional Terms.
You may not offer or impose any terms on any Covered Software in
Source Code form that alters or restricts the applicable version of
this License or the recipients' rights hereunder. You may choose to
offer, and to charge a fee for, warranty, support, indemnity or
liability obligations to one or more recipients of Covered Software.
However, you may do so only on Your own behalf, and not on behalf of
the Initial Developer or any Contributor. You must make it
absolutely clear that any such warranty, support, indemnity or
liability obligation is offered by You alone, and You hereby agree
to indemnify the Initial Developer and every Contributor for any
liability incurred by the Initial Developer or such Contributor as a
result of warranty, support, indemnity or liability terms You offer.
3.5. Distribution of Executable Versions.
You may distribute the Executable form of the Covered Software under
the terms of this License or under the terms of a license of Your
choice, which may contain terms different from this License,
provided that You are in compliance with the terms of this License
and that the license for the Executable form does not attempt to
limit or alter the recipient's rights in the Source Code form from
the rights set forth in this License. If You distribute the Covered
Software in Executable form under a different license, You must make
it absolutely clear that any terms which differ from this License
are offered by You alone, not by the Initial Developer or
Contributor. You hereby agree to indemnify the Initial Developer and
every Contributor for any liability incurred by the Initial
Developer or such Contributor as a result of any such terms You offer.
3.6. Larger Works.
You may create a Larger Work by combining Covered Software with
other code not governed by the terms of this License and distribute
the Larger Work as a single product. In such a case, You must make
sure the requirements of this License are fulfilled for the Covered
Software.
4. Versions of the License.
4.1. New Versions.
Oracle is the initial license steward and may publish revised and/or
new versions of this License from time to time. Each version will be
given a distinguishing version number. Except as provided in Section
4.3, no one other than the license steward has the right to modify
this License.
4.2. Effect of New Versions.
You may always continue to use, distribute or otherwise make the
Covered Software available under the terms of the version of the
License under which You originally received the Covered Software. If
the Initial Developer includes a notice in the Original Software
prohibiting it from being distributed or otherwise made available
under any subsequent version of the License, You must distribute and
make the Covered Software available under the terms of the version
of the License under which You originally received the Covered
Software. Otherwise, You may also choose to use, distribute or
otherwise make the Covered Software available under the terms of any
subsequent version of the License published by the license steward.
4.3. Modified Versions.
When You are an Initial Developer and You want to create a new
license for Your Original Software, You may create and use a
modified version of this License if You: (a) rename the license and
remove any references to the name of the license steward (except to
note that the license differs from this License); and (b) otherwise
make it clear that the license contains terms which differ from this
License.
5. DISCLAIMER OF WARRANTY.
COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE
IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR
NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF
THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE
DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY
OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING,
REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN
ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS
AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
6. TERMINATION.
6.1. This License and the rights granted hereunder will terminate
automatically if You fail to comply with terms herein and fail to
cure such breach within 30 days of becoming aware of the breach.
Provisions which, by their nature, must remain in effect beyond the
termination of this License shall survive.
6.2. If You assert a patent infringement claim (excluding
declaratory judgment actions) against Initial Developer or a
Contributor (the Initial Developer or Contributor against whom You
assert such claim is referred to as "Participant") alleging that the
Participant Software (meaning the Contributor Version where the
Participant is a Contributor or the Original Software where the
Participant is the Initial Developer) directly or indirectly
infringes any patent, then any and all rights granted directly or
indirectly to You by such Participant, the Initial Developer (if the
Initial Developer is not the Participant) and all Contributors under
Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice
from Participant terminate prospectively and automatically at the
expiration of such 60 day notice period, unless if within such 60
day period You withdraw Your claim with respect to the Participant
Software against such Participant either unilaterally or pursuant to
a written agreement with Participant.
6.3. If You assert a patent infringement claim against Participant
alleging that the Participant Software directly or indirectly
infringes any patent where such claim is resolved (such as by
license or settlement) prior to the initiation of patent
infringement litigation, then the reasonable value of the licenses
granted by such Participant under Sections 2.1 or 2.2 shall be taken
into account in determining the amount or value of any payment or
license.
6.4. In the event of termination under Sections 6.1 or 6.2 above,
all end user licenses that have been validly granted by You or any
distributor hereunder prior to termination (excluding licenses
granted to You by any distributor) shall survive termination.
7. LIMITATION OF LIABILITY.
UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
(INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE
INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF
COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE
TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR
CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT
LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER
FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR
LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE
POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT
APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH
PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH
LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR
LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION
AND LIMITATION MAY NOT APPLY TO YOU.
8. U.S. GOVERNMENT END USERS.
The Covered Software is a "commercial item," as that term is defined
in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
software" (as that term is defined at 48 C.F.R. <EFBFBD>
252.227-7014(a)(1)) and "commercial computer software documentation"
as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent
with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4
(June 1995), all U.S. Government End Users acquire Covered Software
with only those rights set forth herein. This U.S. Government Rights
clause is in lieu of, and supersedes, any other FAR, DFAR, or other
clause or provision that addresses Government rights in computer
software under this License.
9. MISCELLANEOUS.
This License represents the complete agreement concerning subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. This License shall be governed by
the law of the jurisdiction specified in a notice contained within
the Original Software (except to the extent applicable law, if any,
provides otherwise), excluding such jurisdiction's conflict-of-law
provisions. Any litigation relating to this License shall be subject
to the jurisdiction of the courts located in the jurisdiction and
venue specified in a notice contained within the Original Software,
with the losing party responsible for costs, including, without
limitation, court costs and reasonable attorneys' fees and expenses.
The application of the United Nations Convention on Contracts for
the International Sale of Goods is expressly excluded. Any law or
regulation which provides that the language of a contract shall be
construed against the drafter shall not apply to this License. You
agree that You alone are responsible for compliance with the United
States export administration regulations (and the export control
laws and regulation of any other countries) when You use, distribute
or otherwise make available any Covered Software.
10. RESPONSIBILITY FOR CLAIMS.
As between Initial Developer and the Contributors, each party is
responsible for claims and damages arising, directly or indirectly,
out of its utilization of rights under this License and You agree to
work with Initial Developer and Contributors to distribute such
responsibility on an equitable basis. Nothing herein is intended or
shall be deemed to constitute any admission of liability.
------------------------------------------------------------------------
NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION
LICENSE (CDDL)
The code released under the CDDL shall be governed by the laws of the
State of California (excluding conflict-of-law provisions). Any
litigation relating to this License shall be subject to the jurisdiction
of the Federal Courts of the Northern District of California and the
state courts of the State of California, with venue lying in Santa Clara
County, California.
The GNU General Public License (GPL) Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
51 Franklin Street, Fifth Floor
Boston, MA 02110-1335
USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your freedom to
share and change it. By contrast, the GNU General Public License is
intended to guarantee your freedom to share and change free software--to
make sure the software is free for all its users. This General Public
License applies to most of the Free Software Foundation's software and
to any other program whose authors commit to using it. (Some other Free
Software Foundation software is covered by the GNU Library General
Public License instead.) You can apply it to your programs, too.
When we speak of free software, we are referring to freedom, not price.
Our General Public Licenses are designed to make sure that you have the
freedom to distribute copies of free software (and charge for this
service if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid anyone
to deny you these rights or to ask you to surrender the rights. These
restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether gratis
or for a fee, you must give the recipients all the rights that you have.
You must make sure that they, too, receive or can get the source code.
And you must show them these terms so they know their rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software patents.
We wish to avoid the danger that redistributors of a free program will
individually obtain patent licenses, in effect making the program
proprietary. To prevent this, we have made it clear that any patent must
be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains a
notice placed by the copyright holder saying it may be distributed under
the terms of this General Public License. The "Program", below, refers
to any such program or work, and a "work based on the Program" means
either the Program or any derivative work under copyright law: that is
to say, a work containing the Program or a portion of it, either
verbatim or with modifications and/or translated into another language.
(Hereinafter, translation is included without limitation in the term
"modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of running
the Program is not restricted, and the output from the Program is
covered only if its contents constitute a work based on the Program
(independent of having been made by running the Program). Whether that
is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's source
code as you receive it, in any medium, provided that you conspicuously
and appropriately publish on each copy an appropriate copyright notice
and disclaimer of warranty; keep intact all the notices that refer to
this License and to the absence of any warranty; and give any other
recipients of the Program a copy of this License along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion of
it, thus forming a work based on the Program, and copy and distribute
such modifications or work under the terms of Section 1 above, provided
that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any part
thereof, to be licensed as a whole at no charge to all third parties
under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a notice
that there is no warranty (or else, saying that you provide a
warranty) and that users may redistribute the program under these
conditions, and telling the user how to view a copy of this License.
(Exception: if the Program itself is interactive but does not
normally print such an announcement, your work based on the Program
is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program, and
can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based on
the Program, the distribution of the whole must be on the terms of this
License, whose permissions for other licensees extend to the entire
whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of a
storage or distribution medium does not bring the other work under the
scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections 1
and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your cost
of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer to
distribute corresponding source code. (This alternative is allowed
only for noncommercial distribution and only if you received the
program in object code or executable form with such an offer, in
accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source code
means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to control
compilation and installation of the executable. However, as a special
exception, the source code distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies the
executable.
If distribution of executable or object code is made by offering access
to copy from a designated place, then offering equivalent access to copy
the source code from the same place counts as distribution of the source
code, even though third parties are not compelled to copy the source
along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt otherwise
to copy, modify, sublicense or distribute the Program is void, and will
automatically terminate your rights under this License. However, parties
who have received copies, or rights, from you under this License will
not have their licenses terminated so long as such parties remain in
full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and all
its terms and conditions for copying, distributing or modifying the
Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further restrictions
on the recipients' exercise of the rights granted herein. You are not
responsible for enforcing compliance by third parties to this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot distribute
so as to satisfy simultaneously your obligations under this License and
any other pertinent obligations, then as a consequence you may not
distribute the Program at all. For example, if a patent license would
not permit royalty-free redistribution of the Program by all those who
receive copies directly or indirectly through you, then the only way you
could satisfy both it and this License would be to refrain entirely from
distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is implemented
by public license practices. Many people have made generous
contributions to the wide range of software distributed through that
system in reliance on consistent application of that system; it is up to
the author/donor to decide if he or she is willing to distribute
software through any other system and a licensee cannot impose that choice.
This section is intended to make thoroughly clear what is believed to be
a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License may
add an explicit geographical distribution limitation excluding those
countries, so that distribution is permitted only in or among countries
not thus excluded. In such case, this License incorporates the
limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new
versions of the General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Program does not specify a version
number of this License, you may choose any version ever published by the
Free Software Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the
author to ask for permission. For software which is copyrighted by the
Free Software Foundation, write to the Free Software Foundation; we
sometimes make exceptions for this. Our decision will be guided by the
two goals of preserving the free status of all derivatives of our free
software and of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
NECESSARY SERVICING, REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest to
attach them to the start of each source file to most effectively convey
the exclusion of warranty; and each file should have at least the
"copyright" line and a pointer to where the full notice is found.
One line to give the program's name and a brief idea of what it does.
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type
`show w'. This is free software, and you are welcome to redistribute
it under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the
appropriate parts of the General Public License. Of course, the commands
you use may be called something other than `show w' and `show c'; they
could even be mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
program `Gnomovision' (which makes passes at compilers) written by
James Hacker.
signature of Ty Coon, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications
with the library. If this is what you want to do, use the GNU Library
General Public License instead of this License.
#
Certain source files distributed by Oracle America, Inc. and/or its
affiliates are subject to the following clarification and special
exception to the GPLv2, based on the GNU Project exception for its
Classpath libraries, known as the GNU Classpath Exception, but only
where Oracle has expressly included in the particular source file's
header the words "Oracle designates this particular file as subject to
the "Classpath" exception as provided by Oracle in the LICENSE file
that accompanied this code."
You should also note that Oracle includes multiple, independent
programs in this software package. Some of those programs are provided
under licenses deemed incompatible with the GPLv2 by the Free Software
Foundation and others. For example, the package includes programs
licensed under the Apache License, Version 2.0. Such programs are
licensed to you under their original licenses.
Oracle facilitates your further distribution of this package by adding
the Classpath Exception to the necessary parts of its GPLv2 code, which
permits you to use that code in combination with other independent
modules not licensed under the GPLv2. However, note that this would
not permit you to commingle code under an incompatible license with
Oracle's GPLv2 licensed code by, for example, cutting and pasting such
code into a file also containing Oracle's GPLv2 licensed code and then
distributing the result. Additionally, if you were to remove the
Classpath Exception from any of the files to which it applies and
distribute the result, you would likely be required to license some or
all of the other code in that distribution under the GPLv2 as well, and
since the GPLv2 is incompatible with the license terms of some items
included in the distribution by Oracle, removing the Classpath
Exception could therefore effectively compromise your ability to
further distribute the package.
Proceed with caution and we recommend that you obtain the advice of a
lawyer skilled in open source matters before removing the Classpath
Exception or making modifications to this package which may
subsequently be redistributed and/or involve the use of third party
software.
CLASSPATH EXCEPTION
Linking this library statically or dynamically with other modules is
making a combined work based on this library. Thus, the terms and
conditions of the GNU General Public License version 2 cover the whole
combination.
As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module. An independent module is a module which is not derived from or
based on this library. If you modify this library, you may extend this
exception to your version of the library, but you are not obligated to
do so. If you do not wish to do so, delete this exception statement
from your version.

9
dolphinscheduler-dist/src/main/assembly/dolphinscheduler-binary.xml vendored

@ -151,6 +151,15 @@
<outputDirectory>.</outputDirectory>
</fileSet>
<!--alert plugin-->
<fileSet>
<directory>${basedir}/../dolphinscheduler-dist/target/dolphinscheduler-dist-${project.version}</directory>
<includes>
<include>**/*.*</include>
</includes>
<outputDirectory>.</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-ui/dist</directory>
<includes>

8
dolphinscheduler-dist/src/main/assembly/dolphinscheduler-nginx.xml vendored

@ -150,6 +150,14 @@
</includes>
<outputDirectory>.</outputDirectory>
</fileSet>
<!--alert plugin-->
<fileSet>
<directory>${basedir}/../dolphinscheduler-dist/target/dolphinscheduler-dist-${project.version}</directory>
<includes>
<include>**/*.*</include>
</includes>
<outputDirectory>.</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-ui/dist</directory>

1
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/builder/TaskExecutionContextBuilder.java

@ -44,7 +44,6 @@ public class TaskExecutionContextBuilder {
taskExecutionContext.setStartTime(taskInstance.getStartTime());
taskExecutionContext.setTaskType(taskInstance.getTaskType());
taskExecutionContext.setLogPath(taskInstance.getLogPath());
taskExecutionContext.setExecutePath(taskInstance.getExecutePath());
taskExecutionContext.setTaskJson(taskInstance.getTaskJson());
taskExecutionContext.setWorkerGroup(taskInstance.getWorkerGroup());
taskExecutionContext.setHost(taskInstance.getHost());

14
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java

@ -35,7 +35,6 @@ import org.apache.dolphinscheduler.common.task.sqoop.targets.TargetMysqlParamete
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.EnumUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
@ -222,7 +221,6 @@ public class TaskPriorityQueueConsumer extends Thread {
String userQueue = processService.queryUserQueueByProcessInstanceId(taskInstance.getProcessInstanceId());
taskInstance.getProcessInstance().setQueue(StringUtils.isEmpty(userQueue) ? tenant.getQueue() : userQueue);
taskInstance.getProcessInstance().setTenantCode(tenant.getTenantCode());
taskInstance.setExecutePath(getExecLocalPath(taskInstance));
taskInstance.setResources(getResourceFullNames(taskNode));
SQLTaskExecutionContext sqlTaskExecutionContext = new SQLTaskExecutionContext();
@ -363,18 +361,6 @@ public class TaskPriorityQueueConsumer extends Thread {
}
}
/**
* get execute local path
*
* @return execute local path
*/
private String getExecLocalPath(TaskInstance taskInstance) {
return FileUtils.getProcessExecDir(taskInstance.getProcessDefine().getProjectId(),
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId());
}
/**
* whehter tenant is null
*

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java

@ -135,7 +135,7 @@ public class MasterRegistry {
*/
private String getLocalAddress() {
return NetUtils.getHost() + ":" + masterConfig.getListenPort();
return NetUtils.getAddr(masterConfig.getListenPort());
}

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/ConditionsTaskExecThread.java

@ -125,7 +125,7 @@ public class ConditionsTaskExecThread extends MasterBaseTaskExecThread {
private void initTaskParameters() {
this.taskInstance.setLogPath(LogUtils.getTaskLogPath(taskInstance));
this.taskInstance.setHost(NetUtils.getHost() + Constants.COLON + masterConfig.getListenPort());
this.taskInstance.setHost(NetUtils.getAddr(masterConfig.getListenPort()));
taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
taskInstance.setStartTime(new Date());
this.processService.saveTaskInstance(taskInstance);

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/DependentTaskExecThread.java

@ -185,7 +185,7 @@ public class DependentTaskExecThread extends MasterBaseTaskExecThread {
private void initTaskParameters() {
taskInstance.setLogPath(LogUtils.getTaskLogPath(taskInstance));
taskInstance.setHost(NetUtils.getHost() + Constants.COLON + masterConfig.getListenPort());
taskInstance.setHost(NetUtils.getAddr(masterConfig.getListenPort()));
taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
taskInstance.setStartTime(new Date());
processService.updateTaskInstance(taskInstance);

4
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterSchedulerService.java

@ -178,7 +178,7 @@ public class MasterSchedulerService extends Thread {
}
}
private String getLocalAddress(){
return NetUtils.getHost() + ":" + masterConfig.getListenPort();
private String getLocalAddress() {
return NetUtils.getAddr(masterConfig.getListenPort());
}
}

4
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java

@ -52,7 +52,9 @@ public class MasterTaskExecThread extends MasterBaseTaskExecThread {
*/
private TaskInstanceCacheManager taskInstanceCacheManager;
/**
* netty executor manager
*/
private NettyExecutorManager nettyExecutorManager;

4
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java

@ -64,9 +64,9 @@ public class FlinkArgsUtils {
args.add(ArgsUtils.escape(appName));
}
// judge flink version,from flink1.10,the parameter -yn removed
// judge flink version, the parameter -yn has removed from flink 1.10
String flinkVersion = param.getFlinkVersion();
if (FLINK_VERSION_BEFORE_1_10.equals(flinkVersion)) {
if (flinkVersion == null || FLINK_VERSION_BEFORE_1_10.equals(flinkVersion)) {
int taskManager = param.getTaskManager();
if (taskManager != 0) { //-yn
args.add(Constants.FLINK_TASK_MANAGE);

9
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/ProcessUtils.java

@ -344,10 +344,7 @@ public class ProcessUtils {
}
String runCmd = String.format("%s %s", Constants.SH, commandFile);
if (StringUtils.isNotEmpty(tenantCode)) {
runCmd = "sudo -u " + tenantCode + " " + runCmd;
}
runCmd = OSUtils.getSudoCmd(tenantCode, runCmd);
logger.info("kill cmd:{}", runCmd);
OSUtils.exeCmd(runCmd);
} catch (Exception e) {
@ -369,8 +366,8 @@ public class ProcessUtils {
return;
}
String cmd = String.format("sudo kill -9 %s", getPidsStr(processId));
String cmd = String.format("kill -9 %s", getPidsStr(processId));
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("process id:{}, cmd:{}", processId, cmd);
OSUtils.exeCmd(cmd);

6
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/UDFUtils.java

@ -73,13 +73,11 @@ public class UDFUtils {
String resourceFullName;
Set<Map.Entry<UdfFunc,String>> entries = udfFuncTenantCodeMap.entrySet();
for (Map.Entry<UdfFunc,String> entry:entries){
String prefixPath = defaultFS.startsWith("file://") ? "file://" : defaultFS;
String uploadPath = HadoopUtils.getHdfsUdfDir(entry.getValue());
if (!uploadPath.startsWith("hdfs:")) {
uploadPath = defaultFS + uploadPath;
}
resourceFullName = entry.getKey().getResourceName();
resourceFullName = resourceFullName.startsWith("/") ? resourceFullName : String.format("/%s",resourceFullName);
sqls.add(String.format("add jar %s%s", uploadPath, resourceFullName));
sqls.add(String.format("add jar %s%s%s", prefixPath, uploadPath, resourceFullName));
}
}

3
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java

@ -139,7 +139,7 @@ public class TaskExecuteProcessor implements NettyRequestProcessor {
taskExecutionContext.getProcessInstanceId(),
taskExecutionContext.getTaskInstanceId()));
taskExecutionContext.setHost(NetUtils.getHost() + ":" + workerConfig.getListenPort());
taskExecutionContext.setHost(NetUtils.getAddr(workerConfig.getListenPort()));
taskExecutionContext.setStartTime(new Date());
taskExecutionContext.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext));
taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION);
@ -147,6 +147,7 @@ public class TaskExecuteProcessor implements NettyRequestProcessor {
// local execute path
String execLocalPath = getExecLocalPath(taskExecutionContext);
logger.info("task instance local execute path : {} ", execLocalPath);
taskExecutionContext.setExecutePath(execLocalPath);
FileUtils.taskLoggerThreadLocal.set(taskLogger);
try {

4
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskKillProcessor.java

@ -115,8 +115,8 @@ public class TaskKillProcessor implements NettyRequestProcessor {
return Pair.of(true, appIds);
}
String cmd = String.format("sudo kill -9 %s", ProcessUtils.getPidsStr(taskExecutionContext.getProcessId()));
String cmd = String.format("kill -9 %s", ProcessUtils.getPidsStr(taskExecutionContext.getProcessId()));
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("process id:{}, cmd:{}", taskExecutionContext.getProcessId(), cmd);
OSUtils.exeCmd(cmd);

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java

@ -169,7 +169,7 @@ public class WorkerRegistry {
* get local address
*/
private String getLocalAddress() {
return NetUtils.getHost() + COLON + workerConfig.getListenPort();
return NetUtils.getAddr(workerConfig.getListenPort());
}
/**

9
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java

@ -27,6 +27,7 @@ import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
@ -265,8 +266,8 @@ public abstract class AbstractCommandExecutor {
if (processId != 0 && process.isAlive()) {
try {
// sudo -u user command to run command
String cmd = String.format("sudo kill %d", processId);
String cmd = String.format("kill %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("soft kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);
@ -286,8 +287,8 @@ public abstract class AbstractCommandExecutor {
private void hardKill(int processId) {
if (processId != 0 && process.isAlive()) {
try {
String cmd = String.format("sudo kill -9 %d", processId);
String cmd = String.format("kill -9 %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("hard kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);

51
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java

@ -17,12 +17,6 @@
package org.apache.dolphinscheduler.server.worker.task.sql;
import static org.apache.dolphinscheduler.common.Constants.HIVE_CONF;
import static org.apache.dolphinscheduler.common.Constants.PASSWORD;
import static org.apache.dolphinscheduler.common.Constants.SEMICOLON;
import static org.apache.dolphinscheduler.common.Constants.USER;
import static org.apache.dolphinscheduler.common.enums.DbType.HIVE;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DbType;
@ -33,7 +27,6 @@ import org.apache.dolphinscheduler.common.task.sql.SqlBinds;
import org.apache.dolphinscheduler.common.task.sql.SqlParameters;
import org.apache.dolphinscheduler.common.task.sql.SqlType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
@ -50,7 +43,6 @@ import org.apache.dolphinscheduler.service.alert.AlertClientService;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
@ -61,7 +53,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
@ -135,8 +126,6 @@ public class SqlTask extends AbstractTask {
sqlParameters.getConnParams());
try {
SQLTaskExecutionContext sqlTaskExecutionContext = taskExecutionContext.getSqlTaskExecutionContext();
// load class
DataSourceFactory.loadClass(DbType.valueOf(sqlParameters.getType()));
// get datasource
baseDataSource = DataSourceFactory.getDatasource(DbType.valueOf(sqlParameters.getType()),
@ -253,10 +242,8 @@ public class SqlTask extends AbstractTask {
PreparedStatement stmt = null;
ResultSet resultSet = null;
try {
// if upload resource is HDFS and kerberos startup
CommonUtils.loadKerberosConf();
// create connection
connection = createConnection();
connection = baseDataSource.getConnection();
// create temp function
if (CollectionUtils.isNotEmpty(createFuncs)) {
createTempFunction(connection, createFuncs);
@ -311,8 +298,12 @@ public class SqlTask extends AbstractTask {
String result = JSONUtils.toJsonString(resultJSONArray);
logger.debug("execute sql : {}", result);
sendAttachment(sqlParameters.getGroupId(), StringUtils.isNotEmpty(sqlParameters.getTitle()) ? sqlParameters.getTitle() : taskExecutionContext.getTaskName() + " query result sets",
JSONUtils.toJsonString(resultJSONArray));
try {
sendAttachment(sqlParameters.getGroupId(), StringUtils.isNotEmpty(sqlParameters.getTitle()) ? sqlParameters.getTitle() : taskExecutionContext.getTaskName() + " query result sets",
JSONUtils.toJsonString(resultJSONArray));
} catch (Exception e) {
logger.warn("sql task sendAttachment error! msg : {} ", e.getMessage());
}
}
/**
@ -364,34 +355,6 @@ public class SqlTask extends AbstractTask {
}
}
/**
* create connection
*
* @return connection
* @throws Exception Exception
*/
private Connection createConnection() throws Exception {
// if hive , load connection params if exists
Connection connection = null;
if (HIVE == DbType.valueOf(sqlParameters.getType())) {
Properties paramProp = new Properties();
paramProp.setProperty(USER, baseDataSource.getUser());
paramProp.setProperty(PASSWORD, baseDataSource.getPassword());
Map<String, String> connParamMap = CollectionUtils.stringToMap(sqlParameters.getConnParams(),
SEMICOLON,
HIVE_CONF);
paramProp.putAll(connParamMap);
connection = DriverManager.getConnection(baseDataSource.getJdbcUrl(),
paramProp);
} else {
connection = DriverManager.getConnection(baseDataSource.getJdbcUrl(),
baseDataSource.getUser(),
baseDataSource.getPassword());
}
return connection;
}
/**
* close jdbc resource
*

31
dolphinscheduler-server/src/main/resources/config/install_config.conf

@ -48,35 +48,10 @@ installPath="/data1_1T/dolphinscheduler"
# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
deployUser="dolphinscheduler"
# alert config
# mail server host
mailServerHost="smtp.exmail.qq.com"
# mail server port
# note: Different protocols and encryption methods correspond to different ports, when SSL/TLS is enabled, make sure the port is correct.
mailServerPort="25"
# sender
mailSender="xxxxxxxxxx"
# user
mailUser="xxxxxxxxxx"
# sender password
# note: The mail.passwd is email service authorization code, not the email login password.
mailPassword="xxxxxxxxxx"
# TLS mail protocol support
starttlsEnable="true"
# SSL mail protocol support
# only one of TLS and SSL can be in the true state.
sslEnable="false"
#note: sslTrust is the same as mailServerHost
sslTrust="smtp.exmail.qq.com"
# alert plugin dir
# Note: find and load the Alert Plugin Jar from this dir.
alertPluginDir="/data1_1T/dolphinscheduler/lib/plugin/alert"
# resource storage type:HDFS,S3,NONE
resourceStorageType="NONE"

4
dolphinscheduler-server/src/main/resources/worker.properties

@ -28,13 +28,13 @@
#worker.reserved.memory=0.3
# worker listener port
#worker.listen.port: 1234
#worker.listen.port=1234
# default worker group
#worker.groups=default
# default worker weight
#work.weight=100
#worker.weight=100
# alert server listener host
alert.listen.host=localhost

4
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/dispatch/executor/NettyExecutorManagerTest.java

@ -79,7 +79,7 @@ public class NettyExecutorManagerTest {
.buildProcessDefinitionRelatedInfo(processDefinition)
.create();
ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER);
executionContext.setHost(Host.of(NetUtils.getHost() + ":" + serverConfig.getListenPort()));
executionContext.setHost(Host.of(NetUtils.getAddr(serverConfig.getListenPort())));
Boolean execute = nettyExecutorManager.execute(executionContext);
Assert.assertTrue(execute);
nettyRemotingServer.close();
@ -98,7 +98,7 @@ public class NettyExecutorManagerTest {
.buildProcessDefinitionRelatedInfo(processDefinition)
.create();
ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER);
executionContext.setHost(Host.of(NetUtils.getHost() + ":4444"));
executionContext.setHost(Host.of(NetUtils.getAddr(4444)));
nettyExecutorManager.execute(executionContext);
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save