From 34e0a663edbd01995c1e594b3b23a856db7b38a4 Mon Sep 17 00:00:00 2001
From: liwenhe1993 <32166572+liwenhe1993@users.noreply.github.com>
Date: Thu, 20 Feb 2020 20:49:38 +0800
Subject: [PATCH] Refactor dockerfile (#1978)
* Support DS to create user and group in windows environment
* Add unit test
* delete combined-server config in dolphinscheduler-daemon.sh file
* refactor dockerfile
* modify dockerfile
* modify dockerfile
---
dockerfile/Dockerfile | 180 +++++------
dockerfile/README.md | 289 +++++++++++++++++-
.../alert.properties => alert.properties.tpl} | 32 +-
...perties => application-api.properties.tpl} | 17 +-
....properties => application.properties.tpl} | 60 ++--
...ommon.properties => common.properties.tpl} | 69 +++--
.../dolphinscheduler/conf/alert_logback.xml | 49 ---
.../conf/apiserver_logback.xml | 60 ----
.../conf/combined_logback.xml | 80 -----
.../conf/common/hadoop/hadoop.properties | 35 ---
.../conf/config/install_config.conf | 20 --
.../conf/env/.dolphinscheduler_env.sh | 21 --
.../conf/env/.escheduler_env.sh | 20 --
.../conf/i18n/messages.properties | 252 ---------------
.../conf/i18n/messages_en_US.properties | 252 ---------------
.../conf/i18n/messages_zh_CN.properties | 250 ---------------
.../mail_templates/alert_mail_template.ftl | 17 --
.../dolphinscheduler/conf/master_logback.xml | 52 ----
.../dao/mapper/AccessTokenMapper.xml | 33 --
.../dao/mapper/AlertGroupMapper.xml | 47 ---
.../dao/mapper/AlertMapper.xml | 26 --
.../dao/mapper/CommandMapper.xml | 43 ---
.../dao/mapper/DataSourceMapper.xml | 79 -----
.../dao/mapper/DataSourceUserMapper.xml | 30 --
.../dao/mapper/ErrorCommandMapper.xml | 36 ---
.../dao/mapper/ProcessDefinitionMapper.xml | 96 ------
.../dao/mapper/ProcessInstanceMapMapper.xml | 43 ---
.../dao/mapper/ProcessInstanceMapper.xml | 185 -----------
.../dao/mapper/ProjectMapper.xml | 68 -----
.../dao/mapper/ProjectUserMapper.xml | 36 ---
.../dao/mapper/QueueMapper.xml | 42 ---
.../dao/mapper/ResourceMapper.xml | 74 -----
.../dao/mapper/ResourceUserMapper.xml | 32 --
.../dao/mapper/ScheduleMapper.xml | 58 ----
.../dao/mapper/SessionMapper.xml | 32 --
.../dao/mapper/TaskInstanceMapper.xml | 132 --------
.../dao/mapper/TenantMapper.xml | 41 ---
.../dao/mapper/UDFUserMapper.xml | 29 --
.../dao/mapper/UdfFuncMapper.xml | 71 -----
.../dao/mapper/UserAlertGroupMapper.xml | 31 --
.../dao/mapper/UserMapper.xml | 72 -----
.../dao/mapper/WorkerGroupMapper.xml | 40 ---
.../dolphinscheduler/conf/worker_logback.xml | 79 -----
.../conf/zookeeper.properties | 42 ---
.../dolphinscheduler_env} | 13 +-
...uartz.properties => quartz.properties.tpl} | 21 +-
dockerfile/conf/maven/settings.xml | 263 ----------------
dockerfile/conf/nginx/dolphinscheduler.conf | 4 +-
dockerfile/hooks/build | 15 +-
dockerfile/hooks/build.bat | 33 ++
dockerfile/hooks/push | 2 +-
dockerfile/hooks/push.bat | 22 ++
dockerfile/startup-init-conf.sh | 100 ++++++
dockerfile/startup.sh | 223 ++++++++++----
dolphinscheduler-ui/package.json | 2 +-
script/dolphinscheduler-daemon.sh | 2 +-
56 files changed, 817 insertions(+), 3135 deletions(-)
rename dockerfile/conf/dolphinscheduler/{conf/alert.properties => alert.properties.tpl} (68%)
rename dockerfile/conf/dolphinscheduler/{conf/application-api.properties => application-api.properties.tpl} (90%)
rename dockerfile/conf/dolphinscheduler/{conf/application-dao.properties => application.properties.tpl} (72%)
rename dockerfile/conf/dolphinscheduler/{conf/common/common.properties => common.properties.tpl} (55%)
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/alert_logback.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/apiserver_logback.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/combined_logback.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/common/hadoop/hadoop.properties
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/config/install_config.conf
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/env/.dolphinscheduler_env.sh
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/env/.escheduler_env.sh
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/i18n/messages.properties
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/i18n/messages_en_US.properties
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/i18n/messages_zh_CN.properties
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/mail_templates/alert_mail_template.ftl
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/master_logback.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/AccessTokenMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/AlertMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/CommandMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/DataSourceMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/DataSourceUserMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ErrorCommandMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProjectMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ProjectUserMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/QueueMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ResourceUserMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/ScheduleMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/SessionMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/TaskInstanceMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/TenantMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UDFUserMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UserAlertGroupMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/UserMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/org/apache/dolphinscheduler/dao/mapper/WorkerGroupMapper.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/worker_logback.xml
delete mode 100644 dockerfile/conf/dolphinscheduler/conf/zookeeper.properties
rename dockerfile/conf/dolphinscheduler/{conf/config/run_config.conf => env/dolphinscheduler_env} (65%)
rename dockerfile/conf/dolphinscheduler/{conf/quartz.properties => quartz.properties.tpl} (81%)
delete mode 100644 dockerfile/conf/maven/settings.xml
create mode 100644 dockerfile/hooks/build.bat
create mode 100644 dockerfile/hooks/push.bat
create mode 100644 dockerfile/startup-init-conf.sh
diff --git a/dockerfile/Dockerfile b/dockerfile/Dockerfile
index 217b2c052f..1fc064c489 100644
--- a/dockerfile/Dockerfile
+++ b/dockerfile/Dockerfile
@@ -15,122 +15,78 @@
# limitations under the License.
#
-FROM ubuntu:18.04
-
-ENV LANG=C.UTF-8
-ENV DEBIAN_FRONTEND=noninteractive
-
-ARG version
-ARG tar_version
-
-#1,install jdk
-
-RUN apt-get update \
- && apt-get -y install openjdk-8-jdk \
- && rm -rf /var/lib/apt/lists/*
-
-ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
+FROM nginx:alpine
+
+ARG VERSION
+
+ENV TZ Asia/Shanghai
+ENV LANG C.UTF-8
+ENV DEBIAN_FRONTEND noninteractive
+
+#1. install dos2unix shadow bash openrc python sudo vim wget iputils net-tools ssh pip kazoo.
+#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
+#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
+RUN apk update && \
+ apk add dos2unix shadow bash openrc python sudo vim wget iputils net-tools openssh-server py2-pip && \
+ apk add --update procps && \
+ openrc boot && \
+ pip install kazoo
+
+#2. install jdk
+RUN apk add openjdk8
+ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
ENV PATH $JAVA_HOME/bin:$PATH
-
-#install wget
-RUN apt-get update && \
- apt-get -y install wget
-#2,install ZK
-
+#3. install zk
RUN cd /opt && \
- wget https://www-us.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz && \
- tar -zxvf zookeeper-3.4.14.tar.gz && \
- mv zookeeper-3.4.14 zookeeper && \
- rm -rf ./zookeeper-*tar.gz && \
+ wget https://downloads.apache.org/zookeeper/zookeeper-3.5.7/apache-zookeeper-3.5.7-bin.tar.gz && \
+ tar -zxvf apache-zookeeper-3.5.7-bin.tar.gz && \
+ mv apache-zookeeper-3.5.7-bin zookeeper && \
mkdir -p /tmp/zookeeper && \
+ rm -rf ./zookeeper-*tar.gz && \
rm -rf /opt/zookeeper/conf/zoo_sample.cfg
-
-ADD ./dockerfile/conf/zookeeper/zoo.cfg /opt/zookeeper/conf
-ENV ZK_HOME=/opt/zookeeper
-ENV PATH $PATH:$ZK_HOME/bin
-
-#3,install maven
-RUN cd /opt && \
- wget http://apache-mirror.rbc.ru/pub/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz && \
- tar -zxvf apache-maven-3.3.9-bin.tar.gz && \
- mv apache-maven-3.3.9 maven && \
- rm -rf ./apache-maven-*tar.gz && \
- rm -rf /opt/maven/conf/settings.xml
-ADD ./dockerfile/conf/maven/settings.xml /opt/maven/conf
-ENV MAVEN_HOME=/opt/maven
-ENV PATH $PATH:$MAVEN_HOME/bin
-
-#4,install node
-RUN cd /opt && \
- wget https://nodejs.org/download/release/v8.9.4/node-v8.9.4-linux-x64.tar.gz && \
- tar -zxvf node-v8.9.4-linux-x64.tar.gz && \
- mv node-v8.9.4-linux-x64 node && \
- rm -rf ./node-v8.9.4-*tar.gz
-ENV NODE_HOME=/opt/node
-ENV PATH $PATH:$NODE_HOME/bin
-
-#5,install postgresql
-RUN apt-get update && \
- apt-get install -y postgresql postgresql-contrib sudo && \
- sed -i 's/localhost/*/g' /etc/postgresql/10/main/postgresql.conf
-
-#6,install nginx
-RUN apt-get update && \
- apt-get install -y nginx && \
- rm -rf /var/lib/apt/lists/* && \
- echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
- chown -R www-data:www-data /var/lib/nginx
-
-#7,install sudo,python,vim,ping and ssh command
-RUN apt-get update && \
- apt-get -y install sudo && \
- apt-get -y install python && \
- apt-get -y install vim && \
- apt-get -y install iputils-ping && \
- apt-get -y install net-tools && \
- apt-get -y install openssh-server && \
- apt-get -y install python-pip && \
- pip install kazoo
-
-#8,add dolphinscheduler source code to /opt/dolphinscheduler_source
-ADD . /opt/dolphinscheduler_source
-
-
-#9,backend compilation
-RUN cd /opt/dolphinscheduler_source && \
- mvn clean package -Prelease -Dmaven.test.skip=true
-
-#10,frontend compilation
-RUN chmod -R 777 /opt/dolphinscheduler_source/dolphinscheduler-ui && \
- cd /opt/dolphinscheduler_source/dolphinscheduler-ui && \
- rm -rf /opt/dolphinscheduler_source/dolphinscheduler-ui/node_modules && \
- npm install node-sass --unsafe-perm && \
- npm install && \
- npm run build
-
-#11,modify dolphinscheduler configuration file
-#backend configuration
-RUN tar -zxvf /opt/dolphinscheduler_source/dolphinscheduler-dist/dolphinscheduler-backend/target/apache-dolphinscheduler-incubating-${tar_version}-dolphinscheduler-backend-bin.tar.gz -C /opt && \
- mv /opt/apache-dolphinscheduler-incubating-${tar_version}-dolphinscheduler-backend-bin /opt/dolphinscheduler && \
- rm -rf /opt/dolphinscheduler/conf
-
-ADD ./dockerfile/conf/dolphinscheduler/conf /opt/dolphinscheduler/conf
-#frontend nginx configuration
-ADD ./dockerfile/conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
-
-#12,open port
-EXPOSE 2181 2888 3888 3306 80 12345 8888
-
-COPY ./dockerfile/startup.sh /root/startup.sh
-#13,modify permissions and set soft links
-RUN chmod +x /root/startup.sh && \
- chmod +x /opt/dolphinscheduler/script/create-dolphinscheduler.sh && \
- chmod +x /opt/zookeeper/bin/zkServer.sh && \
- chmod +x /opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh && \
- rm -rf /bin/sh && \
- ln -s /bin/bash /bin/sh && \
- mkdir -p /tmp/xls
-
+ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
+ENV ZK_HOME /opt/zookeeper
+ENV PATH $ZK_HOME/bin:$PATH
+
+#4. install pg
+RUN apk add postgresql postgresql-contrib
+
+#5. add dolphinscheduler
+ADD ./apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz /opt/
+RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin/ /opt/dolphinscheduler/
+ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
+
+#6. modify nginx
+RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \
+ rm -rf /etc/nginx/conf.d/*
+ADD ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
+
+#7. add configuration and modify permissions and set soft links
+ADD ./startup-init-conf.sh /root/startup-init-conf.sh
+ADD ./startup.sh /root/startup.sh
+ADD ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/
+ADD ./conf/dolphinscheduler/env/dolphinscheduler_env /opt/dolphinscheduler/conf/env/
+RUN chmod +x /root/startup-init-conf.sh && \
+ chmod +x /root/startup.sh && \
+ chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env && \
+ chmod +x /opt/dolphinscheduler/script/*.sh && \
+ chmod +x /opt/dolphinscheduler/bin/*.sh && \
+ chmod +x /opt/zookeeper/bin/*.sh && \
+ dos2unix /root/startup-init-conf.sh && \
+ dos2unix /root/startup.sh && \
+ dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env && \
+ dos2unix /opt/dolphinscheduler/script/*.sh && \
+ dos2unix /opt/dolphinscheduler/bin/*.sh && \
+ dos2unix /opt/zookeeper/bin/*.sh && \
+ rm -rf /bin/sh && \
+ ln -s /bin/bash /bin/sh && \
+ mkdir -p /tmp/xls
+
+#8. remove apk index cache
+RUN rm -rf /var/cache/apk/*
+
+#9. expose port
+EXPOSE 2181 2888 3888 5432 12345 8888
ENTRYPOINT ["/root/startup.sh"]
\ No newline at end of file
diff --git a/dockerfile/README.md b/dockerfile/README.md
index 33b58cacde..fb1500b495 100644
--- a/dockerfile/README.md
+++ b/dockerfile/README.md
@@ -1,11 +1,284 @@
-## Build Image
+## What is Dolphin Scheduler?
+
+Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
+
+Github URL: https://github.com/apache/incubator-dolphinscheduler
+
+Official Website: https://dolphinscheduler.apache.org
+
+![Dolphin Scheduler](https://dolphinscheduler.apache.org/img/hlogo_colorful.svg)
+
+## How to use this image
+
+#### You can start a dolphinscheduler instance
+```
+$ docker run -d --name dolphinscheduler \
+-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test \
+-p 8888:8888 \
+dolphinscheduler all
+```
+
+The default postgres user `root`, postgres password `root` and database `dolphinscheduler` are created in the `startup.sh`.
+
+The default zookeeper is created in the `startup.sh`.
+
+#### Or via Environment Variables **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`ZOOKEEPER_QUORUM`**
+
+You can specify **existing postgres service**. Example:
+
+```
+$ docker run -d --name dolphinscheduler \
+-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
+-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
+-p 8888:8888 \
+dolphinscheduler all
+```
+
+You can specify **existing zookeeper service**. Example:
+
+```
+$ docker run -d --name dolphinscheduler \
+-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
+-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
+-p 8888:8888 \
+dolphinscheduler all
+```
+
+#### Or start a standalone dolphinscheduler server
+
+You can start a standalone dolphinscheduler server.
+
+* Start a **master server**, For example:
+
+```
+$ docker run -d --name dolphinscheduler \
+-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
+-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
+-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
+dolphinscheduler master-server
+```
+
+* Start a **worker server**, For example:
+
+```
+$ docker run -d --name dolphinscheduler \
+-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
+-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
+-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
+dolphinscheduler worker-server
+```
+
+* Start a **api server**, For example:
+
+```
+$ docker run -d --name dolphinscheduler \
+-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
+-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
+-p 12345:12345 \
+dolphinscheduler api-server
+```
+
+* Start a **alert server**, For example:
+
+```
+$ docker run -d --name dolphinscheduler \
+-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \
+-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
+dolphinscheduler alert-server
+```
+
+* Start a **frontend**, For example:
+
+```
+$ docker run -d --name dolphinscheduler \
+-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
+-p 8888:8888 \
+dolphinscheduler frontend
+```
+
+**Note**: You must be specify `POSTGRESQL_HOST` `POSTGRESQL_PORT` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
+
+## Environment Variables
+
+The Dolphin Scheduler image uses several environment variables which are easy to miss. While none of the variables are required, they may significantly aid you in using the image.
+
+**`POSTGRESQL_HOST`**
+
+This environment variable sets the host for PostgreSQL. The default value is `127.0.0.1`.
+
+**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+**`POSTGRESQL_PORT`**
+
+This environment variable sets the port for PostgreSQL. The default value is `5432`.
+
+**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+**`POSTGRESQL_USERNAME`**
+
+This environment variable sets the username for PostgreSQL. The default value is `root`.
+
+**`POSTGRESQL_PASSWORD`**
+
+This environment variable sets the password for PostgreSQL. The default value is `root`.
+
+**`DOLPHINSCHEDULER_ENV_PATH`**
+
+This environment variable sets the runtime environment for task. The default value is `/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh`.
+
+**`TASK_QUEUE`**
+
+This environment variable sets the task queue for `master-server` and `worker-serverr`. The default value is `zookeeper`.
+
+**`ZOOKEEPER_QUORUM`**
+
+This environment variable sets zookeeper quorum for `master-server` and `worker-serverr`. The default value is `127.0.0.1:2181`.
+
+**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`.
+
+**`MASTER_EXEC_THREADS`**
+
+This environment variable sets exec thread num for `master-server`. The default value is `100`.
+
+**`MASTER_EXEC_TASK_NUM`**
+
+This environment variable sets exec task num for `master-server`. The default value is `20`.
+
+**`MASTER_HEARTBEAT_INTERVAL`**
+
+This environment variable sets heartbeat interval for `master-server`. The default value is `10`.
+
+**`MASTER_TASK_COMMIT_RETRYTIMES`**
+
+This environment variable sets task commit retry times for `master-server`. The default value is `5`.
+
+**`MASTER_TASK_COMMIT_INTERVAL`**
+
+This environment variable sets task commit interval for `master-server`. The default value is `1000`.
+
+**`MASTER_MAX_CPULOAD_AVG`**
+
+This environment variable sets max cpu load avg for `master-server`. The default value is `100`.
+
+**`MASTER_RESERVED_MEMORY`**
+
+This environment variable sets reserved memory for `master-server`. The default value is `0.1`.
+
+**`WORKER_EXEC_THREADS`**
+
+This environment variable sets exec thread num for `worker-server`. The default value is `100`.
+
+**`WORKER_HEARTBEAT_INTERVAL`**
+
+This environment variable sets heartbeat interval for `worker-server`. The default value is `10`.
+
+**`WORKER_FETCH_TASK_NUM`**
+
+This environment variable sets fetch task num for `worker-server`. The default value is `3`.
+
+**`WORKER_MAX_CPULOAD_AVG`**
+
+This environment variable sets max cpu load avg for `worker-server`. The default value is `100`.
+
+**`WORKER_RESERVED_MEMORY`**
+
+This environment variable sets reserved memory for `worker-server`. The default value is `0.1`.
+
+**`XLS_FILE_PATH`**
+
+This environment variable sets xls file path for `alert-server`. The default value is `/tmp/xls`.
+
+**`MAIL_SERVER_HOST`**
+
+This environment variable sets mail server host for `alert-server`. The default value is empty.
+
+**`MAIL_SERVER_PORT`**
+
+This environment variable sets mail server port for `alert-server`. The default value is empty.
+
+**`MAIL_SENDER`**
+
+This environment variable sets mail sender for `alert-server`. The default value is empty.
+
+**`MAIL_USER=`**
+
+This environment variable sets mail user for `alert-server`. The default value is empty.
+
+**`MAIL_PASSWD`**
+
+This environment variable sets mail password for `alert-server`. The default value is empty.
+
+**`MAIL_SMTP_STARTTLS_ENABLE`**
+
+This environment variable sets SMTP tls for `alert-server`. The default value is `true`.
+
+**`MAIL_SMTP_SSL_ENABLE`**
+
+This environment variable sets SMTP ssl for `alert-server`. The default value is `false`.
+
+**`MAIL_SMTP_SSL_TRUST`**
+
+This environment variable sets SMTP ssl truest for `alert-server`. The default value is empty.
+
+**`ENTERPRISE_WECHAT_ENABLE`**
+
+This environment variable sets enterprise wechat enable for `alert-server`. The default value is `false`.
+
+**`ENTERPRISE_WECHAT_CORP_ID`**
+
+This environment variable sets enterprise wechat corp id for `alert-server`. The default value is empty.
+
+**`ENTERPRISE_WECHAT_SECRET`**
+
+This environment variable sets enterprise wechat secret for `alert-server`. The default value is empty.
+
+**`ENTERPRISE_WECHAT_AGENT_ID`**
+
+This environment variable sets enterprise wechat agent id for `alert-server`. The default value is empty.
+
+**`ENTERPRISE_WECHAT_USERS`**
+
+This environment variable sets enterprise wechat users for `alert-server`. The default value is empty.
+
+**`FRONTEND_API_SERVER_HOST`**
+
+This environment variable sets api server host for `frontend`. The default value is `127.0.0.1`.
+
+**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
+
+**`FRONTEND_API_SERVER_PORT`**
+
+This environment variable sets api server port for `frontend`. The default value is `123451`.
+
+**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
+
+## Initialization scripts
+
+If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`.
+
+For example, to add an environment variable `API_SERVER_PORT` in `/root/start-init-conf.sh`:
+
+```
+export API_SERVER_PORT=5555
+```
+
+and to modify `/opt/dolphinscheduler/conf/application-api.properties.tpl` template file, add server port:
```
- cd ..
- docker build -t dolphinscheduler --build-arg version=1.1.0 --build-arg tar_version=1.1.0-SNAPSHOT -f dockerfile/Dockerfile .
- docker run -p 12345:12345 -p 8888:8888 --rm --name dolphinscheduler -d dolphinscheduler
+server.port=${API_SERVER_PORT}
```
-* Visit the url: http://127.0.0.1:8888
-* UserName:admin Password:dolphinscheduler123
-## Note
-* MacOS: The memory of docker needs to be set to 4G, default 2G. Steps: Preferences -> Advanced -> adjust resources -> Apply & Restart
+`/root/start-init-conf.sh` will dynamically generate config file:
+
+```sh
+echo "generate app config"
+ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
+eval "cat << EOF
+$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
+EOF
+" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
+done
+
+echo "generate nginx config"
+sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
+sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
+```
diff --git a/dockerfile/conf/dolphinscheduler/conf/alert.properties b/dockerfile/conf/dolphinscheduler/alert.properties.tpl
similarity index 68%
rename from dockerfile/conf/dolphinscheduler/conf/alert.properties
rename to dockerfile/conf/dolphinscheduler/alert.properties.tpl
index 276ef3132a..b940ecd203 100644
--- a/dockerfile/conf/dolphinscheduler/conf/alert.properties
+++ b/dockerfile/conf/dolphinscheduler/alert.properties.tpl
@@ -14,33 +14,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
#alert type is EMAIL/SMS
alert.type=EMAIL
+# alter msg template, default is html template
+#alert.template=html
# mail server configuration
mail.protocol=SMTP
-mail.server.host=smtp.126.com
-mail.server.port=
-mail.sender=dolphinscheduler@126.com
-mail.user=dolphinscheduler@126.com
-mail.passwd=escheduler123
-
+mail.server.host=${MAIL_SERVER_HOST}
+mail.server.port=${MAIL_SERVER_PORT}
+mail.sender=${MAIL_SENDER}
+mail.user=${MAIL_USER}
+mail.passwd=${MAIL_PASSWD}
# TLS
-mail.smtp.starttls.enable=false
+mail.smtp.starttls.enable=${MAIL_SMTP_STARTTLS_ENABLE}
# SSL
-mail.smtp.ssl.enable=true
-mail.smtp.ssl.trust=smtp.126.com
+mail.smtp.ssl.enable=${MAIL_SMTP_SSL_ENABLE}
+mail.smtp.ssl.trust=${MAIL_SMTP_SSL_TRUST}
#xls file path,need create if not exist
-xls.file.path=/tmp/xls
+xls.file.path=${XLS_FILE_PATH}
# Enterprise WeChat configuration
-enterprise.wechat.enable=false
-enterprise.wechat.corp.id=xxxxxxx
-enterprise.wechat.secret=xxxxxxx
-enterprise.wechat.agent.id=xxxxxxx
-enterprise.wechat.users=xxxxxxx
+enterprise.wechat.enable=${ENTERPRISE_WECHAT_ENABLE}
+enterprise.wechat.corp.id=${ENTERPRISE_WECHAT_CORP_ID}
+enterprise.wechat.secret=${ENTERPRISE_WECHAT_SECRET}
+enterprise.wechat.agent.id=${ENTERPRISE_WECHAT_AGENT_ID}
+enterprise.wechat.users=${ENTERPRISE_WECHAT_USERS}
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
diff --git a/dockerfile/conf/dolphinscheduler/conf/application-api.properties b/dockerfile/conf/dolphinscheduler/application-api.properties.tpl
similarity index 90%
rename from dockerfile/conf/dolphinscheduler/conf/application-api.properties
rename to dockerfile/conf/dolphinscheduler/application-api.properties.tpl
index ead8dd872e..424ea55f7d 100644
--- a/dockerfile/conf/dolphinscheduler/conf/application-api.properties
+++ b/dockerfile/conf/dolphinscheduler/application-api.properties.tpl
@@ -14,27 +14,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
-logging.config=classpath:apiserver_logback.xml
-
# server port
server.port=12345
-
# session config
server.servlet.session.timeout=7200
-
+# servlet config
server.servlet.context-path=/dolphinscheduler/
-
# file size limit for upload
spring.servlet.multipart.max-file-size=1024MB
spring.servlet.multipart.max-request-size=1024MB
-
-#post content
+# post content
server.jetty.max-http-post-size=5000000
-
+# i18n
spring.messages.encoding=UTF-8
-
#i18n classpath folder , file prefix messages, if have many files, use "," seperator
spring.messages.basename=i18n/messages
+# Authentication types (supported types: PASSWORD)
+security.authentication.type=PASSWORD
+
+
diff --git a/dockerfile/conf/dolphinscheduler/conf/application-dao.properties b/dockerfile/conf/dolphinscheduler/application.properties.tpl
similarity index 72%
rename from dockerfile/conf/dolphinscheduler/conf/application-dao.properties
rename to dockerfile/conf/dolphinscheduler/application.properties.tpl
index 166c36fbf0..c643c414cd 100644
--- a/dockerfile/conf/dolphinscheduler/conf/application-dao.properties
+++ b/dockerfile/conf/dolphinscheduler/application.properties.tpl
@@ -17,70 +17,57 @@
# base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
-# postgresql
+# postgre
spring.datasource.driver-class-name=org.postgresql.Driver
-spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
-spring.datasource.username=root
-spring.datasource.password=root@123
-
+spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/dolphinscheduler?characterEncoding=utf8
+# mysql
+#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
+#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
+spring.datasource.username=${POSTGRESQL_USERNAME}
+spring.datasource.password=${POSTGRESQL_PASSWORD}
# connection configuration
spring.datasource.initialSize=5
# min connection number
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
-
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
-
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
-
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
-
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
-
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
-
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
-
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
-
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
-
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
-
spring.datasource.spring.datasource.filters=stat,wall,log4j
spring.datasource.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
#mybatis
mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml
-
mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums
-
#Entity scan, where multiple packages are separated by a comma or semicolon
mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity
-
#Primary key type AUTO:" database ID AUTO ", INPUT:" user INPUT ID", ID_WORKER:" global unique ID (numeric type unique ID)", UUID:" global unique ID UUID";
mybatis-plus.global-config.db-config.id-type=AUTO
-
#Field policy IGNORED:" ignore judgment ",NOT_NULL:" not NULL judgment "),NOT_EMPTY:" not NULL judgment"
mybatis-plus.global-config.db-config.field-strategy=NOT_NULL
-
#The hump underline is converted
mybatis-plus.global-config.db-config.column-underline=true
mybatis-plus.global-config.db-config.logic-delete-value=-1
@@ -92,12 +79,37 @@ mybatis-plus.configuration.cache-enabled=false
mybatis-plus.configuration.call-setters-on-nulls=true
mybatis-plus.configuration.jdbc-type-for-null=null
+# master settings
+# master execute thread num
+master.exec.threads=${MASTER_EXEC_THREADS}
+# master execute task number in parallel
+master.exec.task.num=${MASTER_EXEC_TASK_NUM}
+# master heartbeat interval
+master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL}
+# master commit task retry times
+master.task.commit.retryTimes=${MASTER_TASK_COMMIT_RETRYTIMES}
+# master commit task interval
+master.task.commit.interval=${MASTER_TASK_COMMIT_INTERVAL}
+# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
+master.max.cpuload.avg=${MASTER_MAX_CPULOAD_AVG}
+# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
+master.reserved.memory=${MASTER_RESERVED_MEMORY}
+
+# worker settings
+# worker execute thread num
+worker.exec.threads=${WORKER_EXEC_THREADS}
+# worker heartbeat interval
+worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL}
+# submit the number of tasks at a time
+worker.fetch.task.num=${WORKER_FETCH_TASK_NUM}
+# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
+worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG}
+# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
+worker.reserved.memory=${WORKER_RESERVED_MEMORY}
+
# data quality analysis is not currently in use. please ignore the following configuration
-# task record flag
+# task record
task.record.flag=false
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
task.record.datasource.username=xx
task.record.datasource.password=xx
-
-# Logger Config
-#logging.level.org.apache.dolphinscheduler.dao=debug
diff --git a/dockerfile/conf/dolphinscheduler/conf/common/common.properties b/dockerfile/conf/dolphinscheduler/common.properties.tpl
similarity index 55%
rename from dockerfile/conf/dolphinscheduler/conf/common/common.properties
rename to dockerfile/conf/dolphinscheduler/common.properties.tpl
index 24844f693b..ea03e0b78e 100644
--- a/dockerfile/conf/dolphinscheduler/conf/common/common.properties
+++ b/dockerfile/conf/dolphinscheduler/common.properties.tpl
@@ -16,44 +16,69 @@
#
#task queue implementation, default "zookeeper"
-dolphinscheduler.queue.impl=zookeeper
+dolphinscheduler.queue.impl=${TASK_QUEUE}
-# user data directory path, self configuration, please make sure the directory exists and have read write permissions
-data.basedir.path=/tmp/dolphinscheduler
+#zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
+zookeeper.quorum=${ZOOKEEPER_QUORUM}
+#dolphinscheduler root directory
+zookeeper.dolphinscheduler.root=/dolphinscheduler
+#dolphinscheduler failover directory
+zookeeper.session.timeout=300
+zookeeper.connection.timeout=300
+zookeeper.retry.base.sleep=100
+zookeeper.retry.max.sleep=30000
+zookeeper.retry.maxtime=5
-# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
-data.download.basedir.path=/tmp/dolphinscheduler/download
+#============================================================================
+# System
+#============================================================================
+# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
+dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH}
+#resource.view.suffixs
+resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
+# is development state? default "false"
+development.state=true
-# process execute directory. self configuration, please make sure the directory exists and have read write permissions
-process.exec.basepath=/tmp/dolphinscheduler/exec
+# resource upload startup type : HDFS,S3,NONE
+res.upload.startup.type=NONE
+#============================================================================
+# HDFS
+#============================================================================
# Users who have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
-
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
data.store2hdfs.basepath=/dolphinscheduler
-
-# resource upload startup type : HDFS,S3,NONE
-res.upload.startup.type=NONE
-
+# user data directory path, self configuration, please make sure the directory exists and have read write permissions
+data.basedir.path=/tmp/dolphinscheduler
+# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
+data.download.basedir.path=/tmp/dolphinscheduler/download
+# process execute directory. self configuration, please make sure the directory exists and have read write permissions
+process.exec.basepath=/tmp/dolphinscheduler/exec
# whether kerberos starts
hadoop.security.authentication.startup.state=false
-
# java.security.krb5.conf path
java.security.krb5.conf.path=/opt/krb5.conf
-
# loginUserFromKeytab user
login.user.keytab.username=hdfs-mycluster@ESZ.COM
-
# loginUserFromKeytab path
login.user.keytab.path=/opt/hdfs.headless.keytab
-# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
-dolphinscheduler.env.path=/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
+#============================================================================
+# S3
+#============================================================================
+# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
+# to the conf directory,support s3,for example : s3a://dolphinscheduler
+fs.defaultFS=hdfs://mycluster:8020
+# s3 need,s3 endpoint
+fs.s3a.endpoint=http://192.168.199.91:9010
+# s3 need,s3 access key
+fs.s3a.access.key=A3DXS30FO22544RE
+# s3 need,s3 secret key
+fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
+#resourcemanager ha note this need ips , this empty if single
+yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
+# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
+yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
-#resource.view.suffixs
-resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
-
-# is development state? default "false"
-development.state=true
diff --git a/dockerfile/conf/dolphinscheduler/conf/alert_logback.xml b/dockerfile/conf/dolphinscheduler/conf/alert_logback.xml
deleted file mode 100644
index 35e19865b9..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/alert_logback.xml
+++ /dev/null
@@ -1,49 +0,0 @@
-
-
-
-
-
-
-
-
-
- [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
-
- UTF-8
-
-
-
-
- ${log.base}/dolphinscheduler-alert.log
-
- ${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log
- 20
- 64MB
-
-
-
- [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
-
- UTF-8
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dockerfile/conf/dolphinscheduler/conf/apiserver_logback.xml b/dockerfile/conf/dolphinscheduler/conf/apiserver_logback.xml
deleted file mode 100644
index 36719671c9..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/apiserver_logback.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
-
- UTF-8
-
-
-
-
-
-
- INFO
-
- ${log.base}/dolphinscheduler-api-server.log
-
- ${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log
- 168
- 64MB
-
-
-
-
- [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
-
- UTF-8
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dockerfile/conf/dolphinscheduler/conf/combined_logback.xml b/dockerfile/conf/dolphinscheduler/conf/combined_logback.xml
deleted file mode 100644
index 7a9a5b4621..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/combined_logback.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
-
-
-
-
-
-
-
- %highlight([%level]) %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{10}:[%line] - %msg%n
-
- UTF-8
-
-
-
-
- INFO
-
-
-
- taskAppId
- ${log.base}
-
-
-
- ${log.base}/${taskAppId}.log
-
-
- [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
-
- UTF-8
-
- true
-
-
-
-
-
- ${log.base}/dolphinscheduler-combined.log
-
- INFO
-
-
-
- ${log.base}/dolphinscheduler-combined.%d{yyyy-MM-dd_HH}.%i.log
- 168
- 200MB
-
-
-
-
- [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
-
- UTF-8
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dockerfile/conf/dolphinscheduler/conf/common/hadoop/hadoop.properties b/dockerfile/conf/dolphinscheduler/conf/common/hadoop/hadoop.properties
deleted file mode 100644
index 2c19b4a52e..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/common/hadoop/hadoop.properties
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
-# to the conf directory,support s3,for example : s3a://dolphinscheduler
-fs.defaultFS=hdfs://mycluster:8020
-
-# s3 need,s3 endpoint
-fs.s3a.endpoint=http://192.168.199.91:9010
-
-# s3 need,s3 access key
-fs.s3a.access.key=A3DXS30FO22544RE
-
-# s3 need,s3 secret key
-fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
-
-#resourcemanager ha note this need ips , this empty if single
-yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
-
-# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
-yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
\ No newline at end of file
diff --git a/dockerfile/conf/dolphinscheduler/conf/config/install_config.conf b/dockerfile/conf/dolphinscheduler/conf/config/install_config.conf
deleted file mode 100644
index 196a78f49c..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/config/install_config.conf
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-installPath=/data1_1T/dolphinscheduler
-deployUser=dolphinscheduler
-ips=ark0,ark1,ark2,ark3,ark4
diff --git a/dockerfile/conf/dolphinscheduler/conf/env/.dolphinscheduler_env.sh b/dockerfile/conf/dolphinscheduler/conf/env/.dolphinscheduler_env.sh
deleted file mode 100644
index 8e842fe28e..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/env/.dolphinscheduler_env.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-export PYTHON_HOME=/usr/bin/python
-export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
-export DATAX_HOME=/opt/datax/bin/datax.py
\ No newline at end of file
diff --git a/dockerfile/conf/dolphinscheduler/conf/env/.escheduler_env.sh b/dockerfile/conf/dolphinscheduler/conf/env/.escheduler_env.sh
deleted file mode 100644
index 5b85917fc2..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/env/.escheduler_env.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-export PYTHON_HOME=/usr/bin/python
-export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
\ No newline at end of file
diff --git a/dockerfile/conf/dolphinscheduler/conf/i18n/messages.properties b/dockerfile/conf/dolphinscheduler/conf/i18n/messages.properties
deleted file mode 100644
index be880ba26d..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/i18n/messages.properties
+++ /dev/null
@@ -1,252 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-QUERY_SCHEDULE_LIST_NOTES=query schedule list
-EXECUTE_PROCESS_TAG=execute process related operation
-PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
-RUN_PROCESS_INSTANCE_NOTES=run process instance
-START_NODE_LIST=start node list(node name)
-TASK_DEPEND_TYPE=task depend type
-COMMAND_TYPE=command type
-RUN_MODE=run mode
-TIMEOUT=timeout
-EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
-EXECUTE_TYPE=execute type
-START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
-GET_RECEIVER_CC_NOTES=query receiver cc
-DESC=description
-GROUP_NAME=group name
-GROUP_TYPE=group type
-QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
-UPDATE_ALERT_GROUP_NOTES=update alert group
-DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
-VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
-GRANT_ALERT_GROUP_NOTES=grant alert group
-USER_IDS=user id list
-ALERT_GROUP_TAG=alert group related operation
-CREATE_ALERT_GROUP_NOTES=create alert group
-WORKER_GROUP_TAG=worker group related operation
-SAVE_WORKER_GROUP_NOTES=create worker group
-WORKER_GROUP_NAME=worker group name
-WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
-QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
-QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
-DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
-DATA_ANALYSIS_TAG=analysis related operation of task state
-COUNT_TASK_STATE_NOTES=count task state
-COUNT_PROCESS_INSTANCE_NOTES=count process instance state
-COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
-COUNT_COMMAND_STATE_NOTES=count command state
-COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
-
-ACCESS_TOKEN_TAG=access token related operation
-MONITOR_TAG=monitor related operation
-MASTER_LIST_NOTES=master server list
-WORKER_LIST_NOTES=worker server list
-QUERY_DATABASE_STATE_NOTES=query database state
-QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
-TASK_STATE=task instance state
-SOURCE_TABLE=SOURCE TABLE
-DEST_TABLE=dest table
-TASK_DATE=task date
-QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
-DATA_SOURCE_TAG=data source related operation
-CREATE_DATA_SOURCE_NOTES=create data source
-DATA_SOURCE_NAME=data source name
-DATA_SOURCE_NOTE=data source desc
-DB_TYPE=database type
-DATA_SOURCE_HOST=DATA SOURCE HOST
-DATA_SOURCE_PORT=data source port
-DATABASE_NAME=database name
-QUEUE_TAG=queue related operation
-QUERY_QUEUE_LIST_NOTES=query queue list
-QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
-CREATE_QUEUE_NOTES=create queue
-YARN_QUEUE_NAME=yarn(hadoop) queue name
-QUEUE_ID=queue id
-TENANT_DESC=tenant desc
-QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
-QUERY_TENANT_LIST_NOTES=query tenant list
-UPDATE_TENANT_NOTES=update tenant
-DELETE_TENANT_NOTES=delete tenant
-RESOURCES_TAG=resource center related operation
-CREATE_RESOURCE_NOTES=create resource
-RESOURCE_TYPE=resource file type
-RESOURCE_NAME=resource name
-RESOURCE_DESC=resource file desc
-RESOURCE_FILE=resource file
-RESOURCE_ID=resource id
-QUERY_RESOURCE_LIST_NOTES=query resource list
-DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
-VIEW_RESOURCE_BY_ID_NOTES=view resource by id
-ONLINE_CREATE_RESOURCE_NOTES=online create resource
-SUFFIX=resource file suffix
-CONTENT=resource file content
-UPDATE_RESOURCE_NOTES=edit resource file online
-DOWNLOAD_RESOURCE_NOTES=download resource file
-CREATE_UDF_FUNCTION_NOTES=create udf function
-UDF_TYPE=UDF type
-FUNC_NAME=function name
-CLASS_NAME=package and class name
-ARG_TYPES=arguments
-UDF_DESC=udf desc
-VIEW_UDF_FUNCTION_NOTES=view udf function
-UPDATE_UDF_FUNCTION_NOTES=update udf function
-QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
-VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
-DELETE_UDF_FUNCTION_NOTES=delete udf function
-AUTHORIZED_FILE_NOTES=authorized file
-UNAUTHORIZED_FILE_NOTES=unauthorized file
-AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
-UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
-VERIFY_QUEUE_NOTES=verify queue
-TENANT_TAG=tenant related operation
-CREATE_TENANT_NOTES=create tenant
-TENANT_CODE=tenant code
-TENANT_NAME=tenant name
-QUEUE_NAME=queue name
-PASSWORD=password
-DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
-PROJECT_TAG=project related operation
-CREATE_PROJECT_NOTES=create project
-PROJECT_DESC=project description
-UPDATE_PROJECT_NOTES=update project
-PROJECT_ID=project id
-QUERY_PROJECT_BY_ID_NOTES=query project info by project id
-QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
-DELETE_PROJECT_BY_ID_NOTES=delete project by id
-QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
-QUERY_ALL_PROJECT_LIST_NOTES=query all project list
-QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
-TASK_RECORD_TAG=task record related operation
-QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
-CREATE_TOKEN_NOTES=create token ,note: please login first
-QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
-SCHEDULE=schedule
-WARNING_TYPE=warning type(sending strategy)
-WARNING_GROUP_ID=warning group id
-FAILURE_STRATEGY=failure strategy
-RECEIVERS=receivers
-RECEIVERS_CC=receivers cc
-WORKER_GROUP_ID=worker server group id
-PROCESS_INSTANCE_PRIORITY=process instance priority
-UPDATE_SCHEDULE_NOTES=update schedule
-SCHEDULE_ID=schedule id
-ONLINE_SCHEDULE_NOTES=online schedule
-OFFLINE_SCHEDULE_NOTES=offline schedule
-QUERY_SCHEDULE_NOTES=query schedule
-QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
-LOGIN_TAG=User login related operations
-USER_NAME=user name
-PROJECT_NAME=project name
-CREATE_PROCESS_DEFINITION_NOTES=create process definition
-PROCESS_DEFINITION_NAME=process definition name
-PROCESS_DEFINITION_JSON=process definition detail info (json format)
-PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
-PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
-PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
-PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
-PROCESS_DEFINITION_DESC=process definition desc
-PROCESS_DEFINITION_TAG=process definition related opertation
-SIGNOUT_NOTES=logout
-USER_PASSWORD=user password
-UPDATE_PROCESS_INSTANCE_NOTES=update process instance
-QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
-VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
-LOGIN_NOTES=user login
-UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
-PROCESS_DEFINITION_ID=process definition id
-PROCESS_DEFINITION_IDS=process definition ids
-RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
-QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
-QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
-QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
-QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
-PAGE_NO=page no
-PROCESS_INSTANCE_ID=process instance id
-PROCESS_INSTANCE_JSON=process instance info(json format)
-SCHEDULE_TIME=schedule time
-SYNC_DEFINE=update the information of the process instance to the process definition\
-
-RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
-SEARCH_VAL=search val
-USER_ID=user id
-PAGE_SIZE=page size
-LIMIT=limit
-VIEW_TREE_NOTES=view tree
-GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
-PROCESS_DEFINITION_ID_LIST=process definition id list
-QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
-DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
-BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
-QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
-DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
-TASK_ID=task instance id
-SKIP_LINE_NUM=skip line num
-QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
-DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
-USERS_TAG=users related operation
-SCHEDULER_TAG=scheduler related operation
-CREATE_SCHEDULE_NOTES=create schedule
-CREATE_USER_NOTES=create user
-TENANT_ID=tenant id
-QUEUE=queue
-EMAIL=email
-PHONE=phone
-QUERY_USER_LIST_NOTES=query user list
-UPDATE_USER_NOTES=update user
-DELETE_USER_BY_ID_NOTES=delete user by id
-GRANT_PROJECT_NOTES=GRANT PROJECT
-PROJECT_IDS=project ids(string format, multiple projects separated by ",")
-GRANT_RESOURCE_NOTES=grant resource file
-RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
-GET_USER_INFO_NOTES=get user info
-LIST_USER_NOTES=list user
-VERIFY_USER_NAME_NOTES=verify user name
-UNAUTHORIZED_USER_NOTES=cancel authorization
-ALERT_GROUP_ID=alert group id
-AUTHORIZED_USER_NOTES=authorized user
-GRANT_UDF_FUNC_NOTES=grant udf function
-UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
-GRANT_DATASOURCE_NOTES=grant datasource
-DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
-QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
-QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
-QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
-VIEW_GANTT_NOTES=view gantt
-SUB_PROCESS_INSTANCE_ID=sub process instance id
-TASK_NAME=task instance name
-TASK_INSTANCE_TAG=task instance related operation
-LOGGER_TAG=log related operation
-PROCESS_INSTANCE_TAG=process instance related operation
-EXECUTION_STATUS=runing status for workflow and task nodes
-HOST=ip address of running task
-START_DATE=start date
-END_DATE=end date
-QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
-UPDATE_DATA_SOURCE_NOTES=update data source
-DATA_SOURCE_ID=DATA SOURCE ID
-QUERY_DATA_SOURCE_NOTES=query data source by id
-QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
-QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
-CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
-CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
-DELETE_DATA_SOURCE_NOTES=delete data source
-VERIFY_DATA_SOURCE_NOTES=verify data source
-UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
-AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
-DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id
diff --git a/dockerfile/conf/dolphinscheduler/conf/i18n/messages_en_US.properties b/dockerfile/conf/dolphinscheduler/conf/i18n/messages_en_US.properties
deleted file mode 100644
index 24c0843c10..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/i18n/messages_en_US.properties
+++ /dev/null
@@ -1,252 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-QUERY_SCHEDULE_LIST_NOTES=query schedule list
-EXECUTE_PROCESS_TAG=execute process related operation
-PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
-RUN_PROCESS_INSTANCE_NOTES=run process instance
-START_NODE_LIST=start node list(node name)
-TASK_DEPEND_TYPE=task depend type
-COMMAND_TYPE=command type
-RUN_MODE=run mode
-TIMEOUT=timeout
-EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
-EXECUTE_TYPE=execute type
-START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
-GET_RECEIVER_CC_NOTES=query receiver cc
-DESC=description
-GROUP_NAME=group name
-GROUP_TYPE=group type
-QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
-UPDATE_ALERT_GROUP_NOTES=update alert group
-DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
-VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
-GRANT_ALERT_GROUP_NOTES=grant alert group
-USER_IDS=user id list
-ALERT_GROUP_TAG=alert group related operation
-CREATE_ALERT_GROUP_NOTES=create alert group
-WORKER_GROUP_TAG=worker group related operation
-SAVE_WORKER_GROUP_NOTES=create worker group
-WORKER_GROUP_NAME=worker group name
-WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
-QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
-QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
-DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
-DATA_ANALYSIS_TAG=analysis related operation of task state
-COUNT_TASK_STATE_NOTES=count task state
-COUNT_PROCESS_INSTANCE_NOTES=count process instance state
-COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
-COUNT_COMMAND_STATE_NOTES=count command state
-COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
-
-ACCESS_TOKEN_TAG=access token related operation
-MONITOR_TAG=monitor related operation
-MASTER_LIST_NOTES=master server list
-WORKER_LIST_NOTES=worker server list
-QUERY_DATABASE_STATE_NOTES=query database state
-QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
-TASK_STATE=task instance state
-SOURCE_TABLE=SOURCE TABLE
-DEST_TABLE=dest table
-TASK_DATE=task date
-QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
-DATA_SOURCE_TAG=data source related operation
-CREATE_DATA_SOURCE_NOTES=create data source
-DATA_SOURCE_NAME=data source name
-DATA_SOURCE_NOTE=data source desc
-DB_TYPE=database type
-DATA_SOURCE_HOST=DATA SOURCE HOST
-DATA_SOURCE_PORT=data source port
-DATABASE_NAME=database name
-QUEUE_TAG=queue related operation
-QUERY_QUEUE_LIST_NOTES=query queue list
-QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
-CREATE_QUEUE_NOTES=create queue
-YARN_QUEUE_NAME=yarn(hadoop) queue name
-QUEUE_ID=queue id
-TENANT_DESC=tenant desc
-QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
-QUERY_TENANT_LIST_NOTES=query tenant list
-UPDATE_TENANT_NOTES=update tenant
-DELETE_TENANT_NOTES=delete tenant
-RESOURCES_TAG=resource center related operation
-CREATE_RESOURCE_NOTES=create resource
-RESOURCE_TYPE=resource file type
-RESOURCE_NAME=resource name
-RESOURCE_DESC=resource file desc
-RESOURCE_FILE=resource file
-RESOURCE_ID=resource id
-QUERY_RESOURCE_LIST_NOTES=query resource list
-DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
-VIEW_RESOURCE_BY_ID_NOTES=view resource by id
-ONLINE_CREATE_RESOURCE_NOTES=online create resource
-SUFFIX=resource file suffix
-CONTENT=resource file content
-UPDATE_RESOURCE_NOTES=edit resource file online
-DOWNLOAD_RESOURCE_NOTES=download resource file
-CREATE_UDF_FUNCTION_NOTES=create udf function
-UDF_TYPE=UDF type
-FUNC_NAME=function name
-CLASS_NAME=package and class name
-ARG_TYPES=arguments
-UDF_DESC=udf desc
-VIEW_UDF_FUNCTION_NOTES=view udf function
-UPDATE_UDF_FUNCTION_NOTES=update udf function
-QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
-VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
-DELETE_UDF_FUNCTION_NOTES=delete udf function
-AUTHORIZED_FILE_NOTES=authorized file
-UNAUTHORIZED_FILE_NOTES=unauthorized file
-AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
-UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
-VERIFY_QUEUE_NOTES=verify queue
-TENANT_TAG=tenant related operation
-CREATE_TENANT_NOTES=create tenant
-TENANT_CODE=tenant code
-TENANT_NAME=tenant name
-QUEUE_NAME=queue name
-PASSWORD=password
-DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
-PROJECT_TAG=project related operation
-CREATE_PROJECT_NOTES=create project
-PROJECT_DESC=project description
-UPDATE_PROJECT_NOTES=update project
-PROJECT_ID=project id
-QUERY_PROJECT_BY_ID_NOTES=query project info by project id
-QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
-QUERY_ALL_PROJECT_LIST_NOTES=query all project list
-DELETE_PROJECT_BY_ID_NOTES=delete project by id
-QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
-QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
-TASK_RECORD_TAG=task record related operation
-QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
-CREATE_TOKEN_NOTES=create token ,note: please login first
-QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
-SCHEDULE=schedule
-WARNING_TYPE=warning type(sending strategy)
-WARNING_GROUP_ID=warning group id
-FAILURE_STRATEGY=failure strategy
-RECEIVERS=receivers
-RECEIVERS_CC=receivers cc
-WORKER_GROUP_ID=worker server group id
-PROCESS_INSTANCE_PRIORITY=process instance priority
-UPDATE_SCHEDULE_NOTES=update schedule
-SCHEDULE_ID=schedule id
-ONLINE_SCHEDULE_NOTES=online schedule
-OFFLINE_SCHEDULE_NOTES=offline schedule
-QUERY_SCHEDULE_NOTES=query schedule
-QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
-LOGIN_TAG=User login related operations
-USER_NAME=user name
-PROJECT_NAME=project name
-CREATE_PROCESS_DEFINITION_NOTES=create process definition
-PROCESS_DEFINITION_NAME=process definition name
-PROCESS_DEFINITION_JSON=process definition detail info (json format)
-PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
-PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
-PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
-PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
-PROCESS_DEFINITION_DESC=process definition desc
-PROCESS_DEFINITION_TAG=process definition related opertation
-SIGNOUT_NOTES=logout
-USER_PASSWORD=user password
-UPDATE_PROCESS_INSTANCE_NOTES=update process instance
-QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
-VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
-LOGIN_NOTES=user login
-UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
-PROCESS_DEFINITION_ID=process definition id
-PROCESS_DEFINITION_IDS=process definition ids
-RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
-QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
-QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
-QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
-QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
-PAGE_NO=page no
-PROCESS_INSTANCE_ID=process instance id
-PROCESS_INSTANCE_JSON=process instance info(json format)
-SCHEDULE_TIME=schedule time
-SYNC_DEFINE=update the information of the process instance to the process definition\
-
-RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
-SEARCH_VAL=search val
-USER_ID=user id
-PAGE_SIZE=page size
-LIMIT=limit
-VIEW_TREE_NOTES=view tree
-GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
-PROCESS_DEFINITION_ID_LIST=process definition id list
-QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
-DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
-BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
-QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
-DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
-TASK_ID=task instance id
-SKIP_LINE_NUM=skip line num
-QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
-DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
-USERS_TAG=users related operation
-SCHEDULER_TAG=scheduler related operation
-CREATE_SCHEDULE_NOTES=create schedule
-CREATE_USER_NOTES=create user
-TENANT_ID=tenant id
-QUEUE=queue
-EMAIL=email
-PHONE=phone
-QUERY_USER_LIST_NOTES=query user list
-UPDATE_USER_NOTES=update user
-DELETE_USER_BY_ID_NOTES=delete user by id
-GRANT_PROJECT_NOTES=GRANT PROJECT
-PROJECT_IDS=project ids(string format, multiple projects separated by ",")
-GRANT_RESOURCE_NOTES=grant resource file
-RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
-GET_USER_INFO_NOTES=get user info
-LIST_USER_NOTES=list user
-VERIFY_USER_NAME_NOTES=verify user name
-UNAUTHORIZED_USER_NOTES=cancel authorization
-ALERT_GROUP_ID=alert group id
-AUTHORIZED_USER_NOTES=authorized user
-GRANT_UDF_FUNC_NOTES=grant udf function
-UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
-GRANT_DATASOURCE_NOTES=grant datasource
-DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
-QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
-QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
-QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
-VIEW_GANTT_NOTES=view gantt
-SUB_PROCESS_INSTANCE_ID=sub process instance id
-TASK_NAME=task instance name
-TASK_INSTANCE_TAG=task instance related operation
-LOGGER_TAG=log related operation
-PROCESS_INSTANCE_TAG=process instance related operation
-EXECUTION_STATUS=runing status for workflow and task nodes
-HOST=ip address of running task
-START_DATE=start date
-END_DATE=end date
-QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
-UPDATE_DATA_SOURCE_NOTES=update data source
-DATA_SOURCE_ID=DATA SOURCE ID
-QUERY_DATA_SOURCE_NOTES=query data source by id
-QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
-QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
-CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
-CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
-DELETE_DATA_SOURCE_NOTES=delete data source
-VERIFY_DATA_SOURCE_NOTES=verify data source
-UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
-AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
-DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id
diff --git a/dockerfile/conf/dolphinscheduler/conf/i18n/messages_zh_CN.properties b/dockerfile/conf/dolphinscheduler/conf/i18n/messages_zh_CN.properties
deleted file mode 100644
index 5f24a6fedd..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/i18n/messages_zh_CN.properties
+++ /dev/null
@@ -1,250 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-QUERY_SCHEDULE_LIST_NOTES=查询定时列表
-PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作
-RUN_PROCESS_INSTANCE_NOTES=运行流程实例
-START_NODE_LIST=开始节点列表(节点name)
-TASK_DEPEND_TYPE=任务依赖类型
-COMMAND_TYPE=指令类型
-RUN_MODE=运行模式
-TIMEOUT=超时时间
-EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等)
-EXECUTE_TYPE=执行类型
-START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义
-DESC=备注(描述)
-GROUP_NAME=组名称
-GROUP_TYPE=组类型
-QUERY_ALERT_GROUP_LIST_NOTES=告警组列表\
-
-UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组
-DELETE_ALERT_GROUP_BY_ID_NOTES=删除告警组通过ID
-VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在
-GRANT_ALERT_GROUP_NOTES=授权告警组
-USER_IDS=用户ID列表
-ALERT_GROUP_TAG=告警组相关操作
-WORKER_GROUP_TAG=Worker分组管理
-SAVE_WORKER_GROUP_NOTES=创建Worker分组\
-
-WORKER_GROUP_NAME=Worker分组名称
-WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割\
-
-QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理
-QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组
-DELETE_WORKER_GROUP_BY_ID_NOTES=删除worker group通过ID
-DATA_ANALYSIS_TAG=任务状态分析相关操作
-COUNT_TASK_STATE_NOTES=任务状态统计
-COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态
-COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义
-COUNT_COMMAND_STATE_NOTES=统计命令状态
-COUNT_QUEUE_STATE_NOTES=统计队列里任务状态
-ACCESS_TOKEN_TAG=access token相关操作,需要先登录
-MONITOR_TAG=监控相关操作
-MASTER_LIST_NOTES=master服务列表
-WORKER_LIST_NOTES=worker服务列表
-QUERY_DATABASE_STATE_NOTES=查询数据库状态
-QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态
-TASK_STATE=任务实例状态
-SOURCE_TABLE=源表
-DEST_TABLE=目标表
-TASK_DATE=任务时间
-QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表
-DATA_SOURCE_TAG=数据源相关操作
-CREATE_DATA_SOURCE_NOTES=创建数据源
-DATA_SOURCE_NAME=数据源名称
-DATA_SOURCE_NOTE=数据源描述
-DB_TYPE=数据源类型
-DATA_SOURCE_HOST=IP主机名
-DATA_SOURCE_PORT=数据源端口
-DATABASE_NAME=数据库名
-QUEUE_TAG=队列相关操作
-QUERY_QUEUE_LIST_NOTES=查询队列列表
-QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表
-CREATE_QUEUE_NOTES=创建队列
-YARN_QUEUE_NAME=hadoop yarn队列名
-QUEUE_ID=队列ID
-TENANT_DESC=租户描述
-QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表
-QUERY_TENANT_LIST_NOTES=查询租户列表
-UPDATE_TENANT_NOTES=更新租户
-DELETE_TENANT_NOTES=删除租户
-RESOURCES_TAG=资源中心相关操作
-CREATE_RESOURCE_NOTES=创建资源
-RESOURCE_TYPE=资源文件类型
-RESOURCE_NAME=资源文件名称
-RESOURCE_DESC=资源文件描述
-RESOURCE_FILE=资源文件
-RESOURCE_ID=资源ID
-QUERY_RESOURCE_LIST_NOTES=查询资源列表
-DELETE_RESOURCE_BY_ID_NOTES=删除资源通过ID
-VIEW_RESOURCE_BY_ID_NOTES=浏览资源通通过ID
-ONLINE_CREATE_RESOURCE_NOTES=在线创建资源
-SUFFIX=资源文件后缀
-CONTENT=资源文件内容
-UPDATE_RESOURCE_NOTES=在线更新资源文件
-DOWNLOAD_RESOURCE_NOTES=下载资源文件
-CREATE_UDF_FUNCTION_NOTES=创建UDF函数
-UDF_TYPE=UDF类型
-FUNC_NAME=函数名称
-CLASS_NAME=包名类名
-ARG_TYPES=参数
-UDF_DESC=udf描述,使用说明
-VIEW_UDF_FUNCTION_NOTES=查看udf函数
-UPDATE_UDF_FUNCTION_NOTES=更新udf函数
-QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表
-VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名
-DELETE_UDF_FUNCTION_NOTES=删除UDF函数
-AUTHORIZED_FILE_NOTES=授权文件
-UNAUTHORIZED_FILE_NOTES=取消授权文件
-AUTHORIZED_UDF_FUNC_NOTES=授权udf函数
-UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权
-VERIFY_QUEUE_NOTES=验证队列
-TENANT_TAG=租户相关操作
-CREATE_TENANT_NOTES=创建租户
-TENANT_CODE=租户编码
-TENANT_NAME=租户名称
-QUEUE_NAME=队列名
-PASSWORD=密码
-DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...}
-PROJECT_TAG=项目相关操作
-CREATE_PROJECT_NOTES=创建项目
-PROJECT_DESC=项目描述
-UPDATE_PROJECT_NOTES=更新项目
-PROJECT_ID=项目ID
-QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息
-QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表
-QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目
-DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID
-QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目
-QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目
-TASK_RECORD_TAG=任务记录相关操作
-QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表
-CREATE_TOKEN_NOTES=创建token,注意需要先登录
-QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表
-SCHEDULE=定时
-WARNING_TYPE=发送策略
-WARNING_GROUP_ID=发送组ID
-FAILURE_STRATEGY=失败策略
-RECEIVERS=收件人
-RECEIVERS_CC=收件人(抄送)
-WORKER_GROUP_ID=Worker Server分组ID
-PROCESS_INSTANCE_PRIORITY=流程实例优先级
-UPDATE_SCHEDULE_NOTES=更新定时
-SCHEDULE_ID=定时ID
-ONLINE_SCHEDULE_NOTES=定时上线
-OFFLINE_SCHEDULE_NOTES=定时下线
-QUERY_SCHEDULE_NOTES=查询定时
-QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时
-LOGIN_TAG=用户登录相关操作
-USER_NAME=用户名
-PROJECT_NAME=项目名称
-CREATE_PROCESS_DEFINITION_NOTES=创建流程定义
-PROCESS_DEFINITION_NAME=流程定义名称
-PROCESS_DEFINITION_JSON=流程定义详细信息(json格式)
-PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式)
-PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式)
-PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式)
-PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式)
-PROCESS_DEFINITION_DESC=流程定义描述信息
-PROCESS_DEFINITION_TAG=流程定义相关操作
-SIGNOUT_NOTES=退出登录
-USER_PASSWORD=用户密码
-UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例
-QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表
-VERIFY_PROCCESS_DEFINITION_NAME_NOTES=验证流程定义名字
-LOGIN_NOTES=用户登录
-UPDATE_PROCCESS_DEFINITION_NOTES=更新流程定义
-PROCESS_DEFINITION_ID=流程定义ID
-RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义
-QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID
-QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表
-QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表
-QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义
-PAGE_NO=页码号
-PROCESS_INSTANCE_ID=流程实例ID
-PROCESS_INSTANCE_IDS=流程实例ID集合
-PROCESS_INSTANCE_JSON=流程实例信息(json格式)
-SCHEDULE_TIME=定时时间
-SYNC_DEFINE=更新流程实例的信息是否同步到流程定义
-RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例
-SEARCH_VAL=搜索值
-USER_ID=用户ID
-PAGE_SIZE=页大小
-LIMIT=显示多少条
-VIEW_TREE_NOTES=树状图
-GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID
-PROCESS_DEFINITION_ID_LIST=流程定义id列表
-QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID
-BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合
-DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID
-QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID
-DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID
-TASK_ID=任务实例ID
-SKIP_LINE_NUM=忽略行数
-QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志
-DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志
-USERS_TAG=用户相关操作
-SCHEDULER_TAG=定时相关操作
-CREATE_SCHEDULE_NOTES=创建定时
-CREATE_USER_NOTES=创建用户
-TENANT_ID=租户ID
-QUEUE=使用的队列
-EMAIL=邮箱
-PHONE=手机号
-QUERY_USER_LIST_NOTES=查询用户列表
-UPDATE_USER_NOTES=更新用户
-DELETE_USER_BY_ID_NOTES=删除用户通过ID
-GRANT_PROJECT_NOTES=授权项目
-PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割)
-GRANT_RESOURCE_NOTES=授权资源文件
-RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割)
-GET_USER_INFO_NOTES=获取用户信息
-LIST_USER_NOTES=用户列表
-VERIFY_USER_NAME_NOTES=验证用户名
-UNAUTHORIZED_USER_NOTES=取消授权
-ALERT_GROUP_ID=报警组ID
-AUTHORIZED_USER_NOTES=授权用户
-GRANT_UDF_FUNC_NOTES=授权udf函数
-UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割)
-GRANT_DATASOURCE_NOTES=授权数据源
-DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割)
-QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=查询子流程实例通过任务实例ID
-QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=查询父流程实例信息通过子流程实例ID
-QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量
-VIEW_GANTT_NOTES=浏览Gantt图
-SUB_PROCESS_INSTANCE_ID=子流程是咧ID
-TASK_NAME=任务实例名
-TASK_INSTANCE_TAG=任务实例相关操作
-LOGGER_TAG=日志相关操作
-PROCESS_INSTANCE_TAG=流程实例相关操作
-EXECUTION_STATUS=工作流和任务节点的运行状态
-HOST=运行任务的主机IP地址
-START_DATE=开始时间
-END_DATE=结束时间
-QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表
-UPDATE_DATA_SOURCE_NOTES=更新数据源
-DATA_SOURCE_ID=数据源ID
-QUERY_DATA_SOURCE_NOTES=查询数据源通过ID
-QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=查询数据源列表通过数据源类型
-QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表
-CONNECT_DATA_SOURCE_NOTES=连接数据源
-CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试
-DELETE_DATA_SOURCE_NOTES=删除数据源
-VERIFY_DATA_SOURCE_NOTES=验证数据源
-UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源
-AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源
-DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据
diff --git a/dockerfile/conf/dolphinscheduler/conf/mail_templates/alert_mail_template.ftl b/dockerfile/conf/dolphinscheduler/conf/mail_templates/alert_mail_template.ftl
deleted file mode 100644
index c638609090..0000000000
--- a/dockerfile/conf/dolphinscheduler/conf/mail_templates/alert_mail_template.ftl
+++ /dev/null
@@ -1,17 +0,0 @@
-<#--
- ~ Licensed to the Apache Software Foundation (ASF) under one or more
- ~ contributor license agreements. See the NOTICE file distributed with
- ~ this work for additional information regarding copyright ownership.
- ~ The ASF licenses this file to You under the Apache License, Version 2.0
- ~ (the "License"); you may not use this file except in compliance with
- ~ the License. You may obtain a copy of the License at
- ~
- ~ http://www.apache.org/licenses/LICENSE-2.0
- ~
- ~ Unless required by applicable law or agreed to in writing, software
- ~ distributed under the License is distributed on an "AS IS" BASIS,
- ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ~ See the License for the specific language governing permissions and
- ~ limitations under the License.
--->
-