diff --git a/dockerfile/dockerfile-1.0.x/Dockerfile b/dockerfile/dockerfile-1.0.x/Dockerfile
new file mode 100644
index 0000000000..cebf7d5114
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/Dockerfile
@@ -0,0 +1,145 @@
+FROM ubuntu:18.04
+
+MAINTAINER journey "825193156@qq.com"
+
+ENV LANG=C.UTF-8
+
+ARG version
+
+#1,安装jdk
+
+RUN apt-get update \
+ && apt-get -y install openjdk-8-jdk \
+ && rm -rf /var/lib/apt/lists/*
+
+ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
+ENV PATH $JAVA_HOME/bin:$PATH
+
+
+#安装wget
+RUN apt-get update && \
+ apt-get -y install wget
+#2,安装ZK
+#RUN cd /opt && \
+# wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz && \
+# tar -zxvf zookeeper-3.4.6.tar.gz && \
+# mv zookeeper-3.4.6 zookeeper && \
+# rm -rf ./zookeeper-*tar.gz && \
+# mkdir -p /tmp/zookeeper && \
+# rm -rf /opt/zookeeper/conf/zoo_sample.cfg
+
+RUN cd /opt && \
+ wget https://www-us.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz && \
+ tar -zxvf zookeeper-3.4.14.tar.gz && \
+ mv zookeeper-3.4.14 zookeeper && \
+ rm -rf ./zookeeper-*tar.gz && \
+ mkdir -p /tmp/zookeeper && \
+ rm -rf /opt/zookeeper/conf/zoo_sample.cfg
+
+ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
+ENV ZK_HOME=/opt/zookeeper
+ENV PATH $PATH:$ZK_HOME/bin
+
+#3,安装maven
+RUN cd /opt && \
+ wget http://apache-mirror.rbc.ru/pub/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz && \
+ tar -zxvf apache-maven-3.3.9-bin.tar.gz && \
+ mv apache-maven-3.3.9 maven && \
+ rm -rf ./apache-maven-*tar.gz && \
+ rm -rf /opt/maven/conf/settings.xml
+ADD ./conf/maven/settings.xml /opt/maven/conf
+ENV MAVEN_HOME=/opt/maven
+ENV PATH $PATH:$MAVEN_HOME/bin
+
+#4,安装node
+RUN cd /opt && \
+ wget https://nodejs.org/download/release/v8.9.4/node-v8.9.4-linux-x64.tar.gz && \
+ tar -zxvf node-v8.9.4-linux-x64.tar.gz && \
+ mv node-v8.9.4-linux-x64 node && \
+ rm -rf ./node-v8.9.4-*tar.gz
+ENV NODE_HOME=/opt/node
+ENV PATH $PATH:$NODE_HOME/bin
+
+#5,下载escheduler
+RUN cd /opt && \
+ wget https://github.com/analysys/EasyScheduler/archive/${version}.tar.gz && \
+ tar -zxvf ${version}.tar.gz && \
+ mv EasyScheduler-${version} easyscheduler_source && \
+ rm -rf ./${version}.tar.gz
+
+#6,后端编译
+RUN cd /opt/easyscheduler_source && \
+ mvn -U clean package assembly:assembly -Dmaven.test.skip=true
+
+#7,前端编译
+RUN chmod -R 777 /opt/easyscheduler_source/escheduler-ui && \
+ cd /opt/easyscheduler_source/escheduler-ui && \
+ rm -rf /opt/easyscheduler_source/escheduler-ui/node_modules && \
+ npm install node-sass --unsafe-perm && \
+ npm install && \
+ npm run build
+#8,安装mysql
+RUN echo "deb http://cn.archive.ubuntu.com/ubuntu/ xenial main restricted universe multiverse" >> /etc/apt/sources.list
+
+RUN echo "mysql-server mysql-server/root_password password root" | debconf-set-selections
+RUN echo "mysql-server mysql-server/root_password_again password root" | debconf-set-selections
+
+RUN apt-get update && \
+ apt-get -y install mysql-server-5.7 && \
+ mkdir -p /var/lib/mysql && \
+ mkdir -p /var/run/mysqld && \
+ mkdir -p /var/log/mysql && \
+ chown -R mysql:mysql /var/lib/mysql && \
+ chown -R mysql:mysql /var/run/mysqld && \
+ chown -R mysql:mysql /var/log/mysql
+
+
+# UTF-8 and bind-address
+RUN sed -i -e "$ a [client]\n\n[mysql]\n\n[mysqld]" /etc/mysql/my.cnf && \
+ sed -i -e "s/\(\[client\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
+ sed -i -e "s/\(\[mysql\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
+ sed -i -e "s/\(\[mysqld\]\)/\1\ninit_connect='SET NAMES utf8'\ncharacter-set-server = utf8\ncollation-server=utf8_general_ci\nbind-address = 0.0.0.0/g" /etc/mysql/my.cnf
+
+
+#9,安装nginx
+RUN apt-get update && \
+ apt-get install -y nginx && \
+ rm -rf /var/lib/apt/lists/* && \
+ echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
+ chown -R www-data:www-data /var/lib/nginx
+
+#10,修改escheduler配置文件
+#后端配置
+RUN mkdir -p /opt/escheduler && \
+ tar -zxvf /opt/easyscheduler_source/target/escheduler-${version}.tar.gz -C /opt/escheduler && \
+ rm -rf /opt/escheduler/conf
+ADD ./conf/escheduler/conf /opt/escheduler/conf
+#前端nginx配置
+ADD ./conf/nginx/default.conf /etc/nginx/conf.d
+
+#11,开放端口
+EXPOSE 2181 2888 3888 3306 80 12345 8888
+
+#12,安装sudo,python,vim,ping和ssh
+RUN apt-get update && \
+ apt-get -y install sudo && \
+ apt-get -y install python && \
+ apt-get -y install vim && \
+ apt-get -y install iputils-ping && \
+ apt-get -y install net-tools && \
+ apt-get -y install openssh-server && \
+ apt-get -y install python-pip && \
+ pip install kazoo
+
+COPY ./startup.sh /root/startup.sh
+#13,修改权限和设置软连
+RUN chmod +x /root/startup.sh && \
+ chmod +x /opt/escheduler/script/create_escheduler.sh && \
+ chmod +x /opt/zookeeper/bin/zkServer.sh && \
+ chmod +x /opt/escheduler/bin/escheduler-daemon.sh && \
+ rm -rf /bin/sh && \
+ ln -s /bin/bash /bin/sh && \
+ mkdir -p /tmp/xls
+
+
+ENTRYPOINT ["/root/startup.sh"]
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties
new file mode 100644
index 0000000000..0b97c0d397
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties
@@ -0,0 +1,21 @@
+#alert type is EMAIL/SMS
+alert.type=EMAIL
+
+# mail server configuration
+mail.protocol=SMTP
+mail.server.host=smtp.office365.com
+mail.server.port=587
+mail.sender=qiaozhanwei@outlook.com
+mail.passwd=eschedulerBJEG
+
+# TLS
+mail.smtp.starttls.enable=true
+# SSL
+mail.smtp.ssl.enable=false
+
+#xls file path,need create if not exist
+xls.file.path=/tmp/xls
+
+
+
+
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert_logback.xml b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert_logback.xml
new file mode 100644
index 0000000000..c4ca8e9d1f
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert_logback.xml
@@ -0,0 +1,31 @@
+
+
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+ ${log.base}/escheduler-alert.log
+
+ ${log.base}/escheduler-alert.%d{yyyy-MM-dd_HH}.%i.log
+ 20
+ 64MB
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/apiserver_logback.xml b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/apiserver_logback.xml
new file mode 100644
index 0000000000..43e6af951a
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/apiserver_logback.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+
+
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+ INFO
+
+ ${log.base}/escheduler-api-server.log
+
+ ${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log
+ 168
+ 64MB
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties
new file mode 100644
index 0000000000..bf003bfdda
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties
@@ -0,0 +1,16 @@
+# server port
+server.port=12345
+
+# session config
+server.session.timeout=7200
+
+
+server.context-path=/escheduler/
+
+# file size limit for upload
+spring.http.multipart.max-file-size=1024MB
+spring.http.multipart.max-request-size=1024MB
+
+#post content
+server.max-http-post-size=5000000
+
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application_master.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application_master.properties
new file mode 100644
index 0000000000..cc4774ae94
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application_master.properties
@@ -0,0 +1 @@
+logging.config=classpath:master_logback.xml
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties
new file mode 100644
index 0000000000..698c27d446
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties
@@ -0,0 +1,27 @@
+#task queue implementation, default "zookeeper"
+escheduler.queue.impl=zookeeper
+
+# user data directory path, self configuration, please make sure the directory exists and have read write permissions
+data.basedir.path=/tmp/escheduler
+
+# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
+data.download.basedir.path=/tmp/escheduler/download
+
+# process execute directory. self configuration, please make sure the directory exists and have read write permissions
+process.exec.basepath=/tmp/escheduler/exec
+
+# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
+data.store2hdfs.basepath=/escheduler
+
+# whether hdfs starts
+hdfs.startup.state=false
+
+# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
+escheduler.env.path=/opt/escheduler/conf/env/.escheduler_env.sh
+
+#resource.view.suffixs
+resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
+
+# is development state? default "false"
+development.state=false
+
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties
new file mode 100644
index 0000000000..f210ae7533
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties
@@ -0,0 +1,8 @@
+# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
+fs.defaultFS=hdfs://mycluster:8020
+
+#resourcemanager ha note this need ips , this empty if single
+yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
+
+# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
+yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/install_config.conf b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/install_config.conf
new file mode 100644
index 0000000000..43b955d4f1
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/install_config.conf
@@ -0,0 +1,3 @@
+installPath=/data1_1T/escheduler
+deployUser=escheduler
+ips=ark0,ark1,ark2,ark3,ark4
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/run_config.conf b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/run_config.conf
new file mode 100644
index 0000000000..f4cfd832c4
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/run_config.conf
@@ -0,0 +1,4 @@
+masters=ark0,ark1
+workers=ark2,ark3,ark4
+alertServer=ark3
+apiServers=ark1
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/dao/data_source.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/dao/data_source.properties
new file mode 100644
index 0000000000..0dce2943e4
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/dao/data_source.properties
@@ -0,0 +1,53 @@
+# base spring data source configuration
+spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
+spring.datasource.driver-class-name=com.mysql.jdbc.Driver
+spring.datasource.url=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=UTF-8
+spring.datasource.username=root
+spring.datasource.password=root@123
+
+# connection configuration
+spring.datasource.initialSize=5
+# min connection number
+spring.datasource.minIdle=5
+# max connection number
+spring.datasource.maxActive=50
+
+# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
+# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
+spring.datasource.maxWait=60000
+
+# milliseconds for check to close free connections
+spring.datasource.timeBetweenEvictionRunsMillis=60000
+
+# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
+spring.datasource.timeBetweenConnectErrorMillis=60000
+
+# the longest time a connection remains idle without being evicted, in milliseconds
+spring.datasource.minEvictableIdleTimeMillis=300000
+
+#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
+spring.datasource.validationQuery=SELECT 1
+#check whether the connection is valid for timeout, in seconds
+spring.datasource.validationQueryTimeout=3
+
+# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
+# validation Query is performed to check whether the connection is valid
+spring.datasource.testWhileIdle=true
+
+#execute validation to check if the connection is valid when applying for a connection
+spring.datasource.testOnBorrow=true
+#execute validation to check if the connection is valid when the connection is returned
+spring.datasource.testOnReturn=false
+spring.datasource.defaultAutoCommit=true
+spring.datasource.keepAlive=true
+
+# open PSCache, specify count PSCache for every connection
+spring.datasource.poolPreparedStatements=true
+spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
+
+# data quality analysis is not currently in use. please ignore the following configuration
+# task record flag
+task.record.flag=false
+task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
+task.record.datasource.username=xx
+task.record.datasource.password=xx
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh
new file mode 100644
index 0000000000..5b4c3e1b8d
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh
@@ -0,0 +1,4 @@
+export PYTHON_HOME=/usr/bin/python
+export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
+
+export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl
new file mode 100644
index 0000000000..0ff763fa28
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl
@@ -0,0 +1 @@
+
easyscheduler<#if title??> ${title}#if><#if content??> ${content}#if>
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master.properties
new file mode 100644
index 0000000000..9080defc7b
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master.properties
@@ -0,0 +1,21 @@
+# master execute thread num
+master.exec.threads=100
+
+# master execute task number in parallel
+master.exec.task.number=20
+
+# master heartbeat interval
+master.heartbeat.interval=10
+
+# master commit task retry times
+master.task.commit.retryTimes=5
+
+# master commit task interval
+master.task.commit.interval=100
+
+
+# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
+master.max.cpuload.avg=10
+
+# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
+master.reserved.memory=1
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master_logback.xml b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master_logback.xml
new file mode 100644
index 0000000000..d93878218e
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master_logback.xml
@@ -0,0 +1,34 @@
+
+
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+ ${log.base}/escheduler-master.log
+
+ INFO
+
+
+ ${log.base}/escheduler-master.%d{yyyy-MM-dd_HH}.%i.log
+ 168
+ 200MB
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties
new file mode 100644
index 0000000000..3479379418
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties
@@ -0,0 +1,39 @@
+#============================================================================
+# Configure Main Scheduler Properties
+#============================================================================
+org.quartz.scheduler.instanceName = EasyScheduler
+org.quartz.scheduler.instanceId = AUTO
+org.quartz.scheduler.makeSchedulerThreadDaemon = true
+org.quartz.jobStore.useProperties = false
+
+#============================================================================
+# Configure ThreadPool
+#============================================================================
+
+org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
+org.quartz.threadPool.makeThreadsDaemons = true
+org.quartz.threadPool.threadCount = 25
+org.quartz.threadPool.threadPriority = 5
+
+#============================================================================
+# Configure JobStore
+#============================================================================
+
+org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
+org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
+org.quartz.jobStore.tablePrefix = QRTZ_
+org.quartz.jobStore.isClustered = true
+org.quartz.jobStore.misfireThreshold = 60000
+org.quartz.jobStore.clusterCheckinInterval = 5000
+org.quartz.jobStore.dataSource = myDs
+
+#============================================================================
+# Configure Datasources
+#============================================================================
+
+org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
+org.quartz.dataSource.myDs.URL = jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=utf8
+org.quartz.dataSource.myDs.user = root
+org.quartz.dataSource.myDs.password = root@123
+org.quartz.dataSource.myDs.maxConnections = 10
+org.quartz.dataSource.myDs.validationQuery = select 1
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker.properties
new file mode 100644
index 0000000000..e58bd86dcf
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker.properties
@@ -0,0 +1,15 @@
+# worker execute thread num
+worker.exec.threads=100
+
+# worker heartbeat interval
+worker.heartbeat.interval=10
+
+# submit the number of tasks at a time
+worker.fetch.task.num = 3
+
+
+# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
+#worker.max.cpuload.avg=10
+
+# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
+worker.reserved.memory=1
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker_logback.xml b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker_logback.xml
new file mode 100644
index 0000000000..32914ec84f
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker_logback.xml
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+ INFO
+
+
+ ${log.base}/{processDefinitionId}/{processInstanceId}/{taskInstanceId}.log
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+ true
+
+
+
+ ${log.base}/escheduler-worker.log
+
+ INFO
+
+
+
+ ${log.base}/escheduler-worker.%d{yyyy-MM-dd_HH}.%i.log
+ 168
+ 200MB
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties
new file mode 100644
index 0000000000..092688a153
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties
@@ -0,0 +1,25 @@
+#zookeeper cluster
+zookeeper.quorum=127.0.0.1:2181
+
+#escheduler root directory
+zookeeper.escheduler.root=/escheduler
+
+#zookeeper server dirctory
+zookeeper.escheduler.dead.servers=/escheduler/dead-servers
+zookeeper.escheduler.masters=/escheduler/masters
+zookeeper.escheduler.workers=/escheduler/workers
+
+#zookeeper lock dirctory
+zookeeper.escheduler.lock.masters=/escheduler/lock/masters
+zookeeper.escheduler.lock.workers=/escheduler/lock/workers
+
+#escheduler failover directory
+zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters
+zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers
+zookeeper.escheduler.lock.failover.startup.masters=/escheduler/lock/failover/startup-masters
+
+#escheduler failover directory
+zookeeper.session.timeout=300
+zookeeper.connection.timeout=300
+zookeeper.retry.sleep=1000
+zookeeper.retry.maxtime=5
diff --git a/dockerfile/dockerfile-1.0.x/conf/maven/settings.xml b/dockerfile/dockerfile-1.0.x/conf/maven/settings.xml
new file mode 100644
index 0000000000..6bdea4a1bf
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/maven/settings.xml
@@ -0,0 +1,263 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ nexus-aliyun
+ central
+ Nexus aliyun
+ http://maven.aliyun.com/nexus/content/groups/public
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dockerfile/dockerfile-1.0.x/conf/nginx/default.conf b/dockerfile/dockerfile-1.0.x/conf/nginx/default.conf
new file mode 100644
index 0000000000..2d43c32b63
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/nginx/default.conf
@@ -0,0 +1,31 @@
+server {
+ listen 8888;
+ server_name localhost;
+ #charset koi8-r;
+ #access_log /var/log/nginx/host.access.log main;
+ location / {
+ root /opt/easyscheduler_source/escheduler-ui/dist;
+ index index.html index.html;
+ }
+ location /escheduler {
+ proxy_pass http://127.0.0.1:12345;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header x_real_ipP $remote_addr;
+ proxy_set_header remote_addr $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_http_version 1.1;
+ proxy_connect_timeout 300s;
+ proxy_read_timeout 300s;
+ proxy_send_timeout 300s;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ }
+ #error_page 404 /404.html;
+ # redirect server error pages to the static page /50x.html
+ #
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root /usr/share/nginx/html;
+ }
+}
diff --git a/dockerfile/dockerfile-1.0.x/conf/zookeeper/zoo.cfg b/dockerfile/dockerfile-1.0.x/conf/zookeeper/zoo.cfg
new file mode 100644
index 0000000000..a5a2c0bbe3
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/conf/zookeeper/zoo.cfg
@@ -0,0 +1,28 @@
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just
+# example sakes.
+dataDir=/tmp/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# the maximum number of client connections.
+# increase this if you need to handle more clients
+#maxClientCnxns=60
+#
+# Be sure to read the maintenance section of the
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1
diff --git a/dockerfile/dockerfile-1.0.x/hooks/build b/dockerfile/dockerfile-1.0.x/hooks/build
new file mode 100644
index 0000000000..aebf6e4b23
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/hooks/build
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+echo "------ escheduler start - build -------"
+printenv
+
+docker build --build-arg version=$version -t $DOCKER_REPO:$version .
+
+echo "------ escheduler end - build -------"
diff --git a/dockerfile/dockerfile-1.0.x/hooks/push b/dockerfile/dockerfile-1.0.x/hooks/push
new file mode 100644
index 0000000000..7b98da1a8d
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/hooks/push
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+echo "------ push start -------"
+printenv
+
+docker push $DOCKER_REPO:$version
+
+echo "------ push end -------"
diff --git a/dockerfile/dockerfile-1.0.x/startup.sh b/dockerfile/dockerfile-1.0.x/startup.sh
new file mode 100644
index 0000000000..5ed150e1ba
--- /dev/null
+++ b/dockerfile/dockerfile-1.0.x/startup.sh
@@ -0,0 +1,79 @@
+#! /bin/bash
+
+set -e
+if [ `netstat -anop|grep mysql|wc -l` -gt 0 ];then
+ echo "MySQL is Running."
+else
+ MYSQL_ROOT_PWD="root@123"
+ ESZ_DB="escheduler"
+ echo "启动mysql服务"
+ chown -R mysql:mysql /var/lib/mysql /var/run/mysqld
+ find /var/lib/mysql -type f -exec touch {} \; && service mysql restart $ sleep 10
+ if [ ! -f /nohup.out ];then
+ echo "设置mysql密码"
+ mysql --user=root --password=root -e "UPDATE mysql.user set authentication_string=password('$MYSQL_ROOT_PWD') where user='root'; FLUSH PRIVILEGES;"
+
+ echo "设置mysql权限"
+ mysql --user=root --password=$MYSQL_ROOT_PWD -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '$MYSQL_ROOT_PWD' WITH GRANT OPTION; FLUSH PRIVILEGES;"
+ echo "创建escheduler数据库"
+ mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
+ echo "导入mysql数据"
+ nohup /opt/escheduler/script/create_escheduler.sh &
+ fi
+
+ if [ `mysql --user=root --password=$MYSQL_ROOT_PWD -s -r -e "SELECT count(TABLE_NAME) FROM information_schema.TABLES WHERE TABLE_SCHEMA='escheduler';" | grep -v count` -eq 38 ];then
+ echo "\`$ESZ_DB\` 表个数正确"
+ else
+ echo "\`$ESZ_DB\` 表个数不正确"
+ mysql --user=root --password=$MYSQL_ROOT_PWD -e "DROP DATABASE \`$ESZ_DB\`;"
+ echo "创建escheduler数据库"
+ mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
+ echo "导入mysql数据"
+ nohup /opt/escheduler/script/create_escheduler.sh &
+ fi
+fi
+
+/opt/zookeeper/bin/zkServer.sh restart
+
+sleep 10
+
+echo "启动api-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop api-server
+/opt/escheduler/bin/escheduler-daemon.sh start api-server
+
+
+
+echo "启动master-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop master-server
+python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/masters
+/opt/escheduler/bin/escheduler-daemon.sh start master-server
+
+echo "启动worker-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop worker-server
+python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/workers
+/opt/escheduler/bin/escheduler-daemon.sh start worker-server
+
+
+echo "启动logger-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop logger-server
+/opt/escheduler/bin/escheduler-daemon.sh start logger-server
+
+
+echo "启动alert-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop alert-server
+/opt/escheduler/bin/escheduler-daemon.sh start alert-server
+
+
+
+
+
+echo "启动nginx"
+/etc/init.d/nginx stop
+nginx &
+
+
+while true
+do
+ sleep 101
+done
+exec "$@"
diff --git a/dockerfile/dockerfile-1.1.x/Dockerfile b/dockerfile/dockerfile-1.1.x/Dockerfile
new file mode 100644
index 0000000000..d194fd7bdb
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/Dockerfile
@@ -0,0 +1,146 @@
+FROM ubuntu:18.04
+
+MAINTAINER journey "825193156@qq.com"
+
+ENV LANG=C.UTF-8
+
+ARG version
+ARG tar_version
+
+#1,安装jdk
+
+RUN apt-get update \
+ && apt-get -y install openjdk-8-jdk \
+ && rm -rf /var/lib/apt/lists/*
+
+ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
+ENV PATH $JAVA_HOME/bin:$PATH
+
+
+#安装wget
+RUN apt-get update && \
+ apt-get -y install wget
+#2,安装ZK
+#RUN cd /opt && \
+# wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz && \
+# tar -zxvf zookeeper-3.4.6.tar.gz && \
+# mv zookeeper-3.4.6 zookeeper && \
+# rm -rf ./zookeeper-*tar.gz && \
+# mkdir -p /tmp/zookeeper && \
+# rm -rf /opt/zookeeper/conf/zoo_sample.cfg
+
+RUN cd /opt && \
+ wget https://www-us.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz && \
+ tar -zxvf zookeeper-3.4.14.tar.gz && \
+ mv zookeeper-3.4.14 zookeeper && \
+ rm -rf ./zookeeper-*tar.gz && \
+ mkdir -p /tmp/zookeeper && \
+ rm -rf /opt/zookeeper/conf/zoo_sample.cfg
+
+ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
+ENV ZK_HOME=/opt/zookeeper
+ENV PATH $PATH:$ZK_HOME/bin
+
+#3,安装maven
+RUN cd /opt && \
+ wget http://apache-mirror.rbc.ru/pub/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz && \
+ tar -zxvf apache-maven-3.3.9-bin.tar.gz && \
+ mv apache-maven-3.3.9 maven && \
+ rm -rf ./apache-maven-*tar.gz && \
+ rm -rf /opt/maven/conf/settings.xml
+ADD ./conf/maven/settings.xml /opt/maven/conf
+ENV MAVEN_HOME=/opt/maven
+ENV PATH $PATH:$MAVEN_HOME/bin
+
+#4,安装node
+RUN cd /opt && \
+ wget https://nodejs.org/download/release/v8.9.4/node-v8.9.4-linux-x64.tar.gz && \
+ tar -zxvf node-v8.9.4-linux-x64.tar.gz && \
+ mv node-v8.9.4-linux-x64 node && \
+ rm -rf ./node-v8.9.4-*tar.gz
+ENV NODE_HOME=/opt/node
+ENV PATH $PATH:$NODE_HOME/bin
+
+#5,下载escheduler
+RUN cd /opt && \
+ wget https://github.com/analysys/EasyScheduler/archive/${version}.tar.gz && \
+ tar -zxvf ${version}.tar.gz && \
+ mv EasyScheduler-${version} easyscheduler_source && \
+ rm -rf ./${version}.tar.gz
+
+#6,后端编译
+RUN cd /opt/easyscheduler_source && \
+ mvn -U clean package assembly:assembly -Dmaven.test.skip=true
+
+#7,前端编译
+RUN chmod -R 777 /opt/easyscheduler_source/escheduler-ui && \
+ cd /opt/easyscheduler_source/escheduler-ui && \
+ rm -rf /opt/easyscheduler_source/escheduler-ui/node_modules && \
+ npm install node-sass --unsafe-perm && \
+ npm install && \
+ npm run build
+#8,安装mysql
+RUN echo "deb http://cn.archive.ubuntu.com/ubuntu/ xenial main restricted universe multiverse" >> /etc/apt/sources.list
+
+RUN echo "mysql-server mysql-server/root_password password root" | debconf-set-selections
+RUN echo "mysql-server mysql-server/root_password_again password root" | debconf-set-selections
+
+RUN apt-get update && \
+ apt-get -y install mysql-server-5.7 && \
+ mkdir -p /var/lib/mysql && \
+ mkdir -p /var/run/mysqld && \
+ mkdir -p /var/log/mysql && \
+ chown -R mysql:mysql /var/lib/mysql && \
+ chown -R mysql:mysql /var/run/mysqld && \
+ chown -R mysql:mysql /var/log/mysql
+
+
+# UTF-8 and bind-address
+RUN sed -i -e "$ a [client]\n\n[mysql]\n\n[mysqld]" /etc/mysql/my.cnf && \
+ sed -i -e "s/\(\[client\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
+ sed -i -e "s/\(\[mysql\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
+ sed -i -e "s/\(\[mysqld\]\)/\1\ninit_connect='SET NAMES utf8'\ncharacter-set-server = utf8\ncollation-server=utf8_general_ci\nbind-address = 0.0.0.0/g" /etc/mysql/my.cnf
+
+
+#9,安装nginx
+RUN apt-get update && \
+ apt-get install -y nginx && \
+ rm -rf /var/lib/apt/lists/* && \
+ echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
+ chown -R www-data:www-data /var/lib/nginx
+
+#10,修改escheduler配置文件
+#后端配置
+RUN mkdir -p /opt/escheduler && \
+ tar -zxvf /opt/easyscheduler_source/target/escheduler-${tar_version}.tar.gz -C /opt/escheduler && \
+ rm -rf /opt/escheduler/conf
+ADD ./conf/escheduler/conf /opt/escheduler/conf
+#前端nginx配置
+ADD ./conf/nginx/default.conf /etc/nginx/conf.d
+
+#11,开放端口
+EXPOSE 2181 2888 3888 3306 80 12345 8888
+
+#12,安装sudo,python,vim,ping和ssh
+RUN apt-get update && \
+ apt-get -y install sudo && \
+ apt-get -y install python && \
+ apt-get -y install vim && \
+ apt-get -y install iputils-ping && \
+ apt-get -y install net-tools && \
+ apt-get -y install openssh-server && \
+ apt-get -y install python-pip && \
+ pip install kazoo
+
+COPY ./startup.sh /root/startup.sh
+#13,修改权限和设置软连
+RUN chmod +x /root/startup.sh && \
+ chmod +x /opt/escheduler/script/create_escheduler.sh && \
+ chmod +x /opt/zookeeper/bin/zkServer.sh && \
+ chmod +x /opt/escheduler/bin/escheduler-daemon.sh && \
+ rm -rf /bin/sh && \
+ ln -s /bin/bash /bin/sh && \
+ mkdir -p /tmp/xls
+
+
+ENTRYPOINT ["/root/startup.sh"]
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert.properties
new file mode 100644
index 0000000000..df7d8372d7
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert.properties
@@ -0,0 +1,30 @@
+#alert type is EMAIL/SMS
+alert.type=EMAIL
+
+# mail server configuration
+mail.protocol=SMTP
+mail.server.host=smtp.office365.com
+mail.server.port=587
+mail.sender=qiaozhanwei@outlook.com
+mail.passwd=eschedulerBJEG
+
+# TLS
+mail.smtp.starttls.enable=true
+# SSL
+mail.smtp.ssl.enable=false
+
+#xls file path,need create if not exist
+xls.file.path=/tmp/xls
+
+# Enterprise WeChat configuration
+enterprise.wechat.corp.id=xxxxxxx
+enterprise.wechat.secret=xxxxxxx
+enterprise.wechat.agent.id=xxxxxxx
+enterprise.wechat.users=xxxxxxx
+enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
+enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
+enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
+enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
+
+
+
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml
new file mode 100644
index 0000000000..c4ca8e9d1f
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml
@@ -0,0 +1,31 @@
+
+
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+ ${log.base}/escheduler-alert.log
+
+ ${log.base}/escheduler-alert.%d{yyyy-MM-dd_HH}.%i.log
+ 20
+ 64MB
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml
new file mode 100644
index 0000000000..43e6af951a
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+
+
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+ INFO
+
+ ${log.base}/escheduler-api-server.log
+
+ ${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log
+ 168
+ 64MB
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application.properties
new file mode 100644
index 0000000000..b817c18a4a
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application.properties
@@ -0,0 +1,19 @@
+# server port
+server.port=12345
+
+# session config
+server.servlet.session.timeout=7200
+
+server.servlet.context-path=/escheduler/
+
+# file size limit for upload
+spring.servlet.multipart.max-file-size=1024MB
+spring.servlet.multipart.max-request-size=1024MB
+
+#post content
+server.jetty.max-http-post-size=5000000
+
+spring.messages.encoding=UTF-8
+
+#i18n classpath folder , file prefix messages, if have many files, use "," seperator
+spring.messages.basename=i18n/messages
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties
new file mode 100644
index 0000000000..cc4774ae94
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties
@@ -0,0 +1 @@
+logging.config=classpath:master_logback.xml
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/common.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/common.properties
new file mode 100644
index 0000000000..15af284597
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/common.properties
@@ -0,0 +1,42 @@
+#task queue implementation, default "zookeeper"
+escheduler.queue.impl=zookeeper
+
+# user data directory path, self configuration, please make sure the directory exists and have read write permissions
+data.basedir.path=/tmp/escheduler
+
+# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
+data.download.basedir.path=/tmp/escheduler/download
+
+# process execute directory. self configuration, please make sure the directory exists and have read write permissions
+process.exec.basepath=/tmp/escheduler/exec
+
+# Users who have permission to create directories under the HDFS root path
+hdfs.root.user=hdfs
+
+# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
+data.store2hdfs.basepath=/escheduler
+
+# resource upload startup type : HDFS,S3,NONE
+res.upload.startup.type=NONE
+
+# whether kerberos starts
+hadoop.security.authentication.startup.state=false
+
+# java.security.krb5.conf path
+java.security.krb5.conf.path=/opt/krb5.conf
+
+# loginUserFromKeytab user
+login.user.keytab.username=hdfs-mycluster@ESZ.COM
+
+# loginUserFromKeytab path
+login.user.keytab.path=/opt/hdfs.headless.keytab
+
+# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
+escheduler.env.path=/opt/escheduler/conf/env/.escheduler_env.sh
+
+#resource.view.suffixs
+resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
+
+# is development state? default "false"
+development.state=true
+
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/hadoop/hadoop.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/hadoop/hadoop.properties
new file mode 100644
index 0000000000..81452a83a2
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/hadoop/hadoop.properties
@@ -0,0 +1,18 @@
+# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
+# to the conf directory,support s3,for example : s3a://escheduler
+fs.defaultFS=hdfs://mycluster:8020
+
+# s3 need,s3 endpoint
+fs.s3a.endpoint=http://192.168.199.91:9010
+
+# s3 need,s3 access key
+fs.s3a.access.key=A3DXS30FO22544RE
+
+# s3 need,s3 secret key
+fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
+
+#resourcemanager ha note this need ips , this empty if single
+yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
+
+# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
+yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf
new file mode 100644
index 0000000000..43b955d4f1
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf
@@ -0,0 +1,3 @@
+installPath=/data1_1T/escheduler
+deployUser=escheduler
+ips=ark0,ark1,ark2,ark3,ark4
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf
new file mode 100644
index 0000000000..f4cfd832c4
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf
@@ -0,0 +1,4 @@
+masters=ark0,ark1
+workers=ark2,ark3,ark4
+alertServer=ark3
+apiServers=ark1
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties
new file mode 100644
index 0000000000..0dce2943e4
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties
@@ -0,0 +1,53 @@
+# base spring data source configuration
+spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
+spring.datasource.driver-class-name=com.mysql.jdbc.Driver
+spring.datasource.url=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=UTF-8
+spring.datasource.username=root
+spring.datasource.password=root@123
+
+# connection configuration
+spring.datasource.initialSize=5
+# min connection number
+spring.datasource.minIdle=5
+# max connection number
+spring.datasource.maxActive=50
+
+# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
+# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
+spring.datasource.maxWait=60000
+
+# milliseconds for check to close free connections
+spring.datasource.timeBetweenEvictionRunsMillis=60000
+
+# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
+spring.datasource.timeBetweenConnectErrorMillis=60000
+
+# the longest time a connection remains idle without being evicted, in milliseconds
+spring.datasource.minEvictableIdleTimeMillis=300000
+
+#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
+spring.datasource.validationQuery=SELECT 1
+#check whether the connection is valid for timeout, in seconds
+spring.datasource.validationQueryTimeout=3
+
+# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
+# validation Query is performed to check whether the connection is valid
+spring.datasource.testWhileIdle=true
+
+#execute validation to check if the connection is valid when applying for a connection
+spring.datasource.testOnBorrow=true
+#execute validation to check if the connection is valid when the connection is returned
+spring.datasource.testOnReturn=false
+spring.datasource.defaultAutoCommit=true
+spring.datasource.keepAlive=true
+
+# open PSCache, specify count PSCache for every connection
+spring.datasource.poolPreparedStatements=true
+spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
+
+# data quality analysis is not currently in use. please ignore the following configuration
+# task record flag
+task.record.flag=false
+task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
+task.record.datasource.username=xx
+task.record.datasource.password=xx
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/env/.escheduler_env.sh b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/env/.escheduler_env.sh
new file mode 100644
index 0000000000..75362d494d
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/env/.escheduler_env.sh
@@ -0,0 +1,3 @@
+export PYTHON_HOME=/usr/bin/python
+export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
+export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages.properties
new file mode 100644
index 0000000000..a663c71013
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages.properties
@@ -0,0 +1,229 @@
+QUERY_SCHEDULE_LIST_NOTES=query schedule list
+EXECUTE_PROCESS_TAG=execute process related operation
+PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
+RUN_PROCESS_INSTANCE_NOTES=run process instance
+START_NODE_LIST=start node list(node name)
+TASK_DEPEND_TYPE=task depend type
+COMMAND_TYPE=command type
+RUN_MODE=run mode
+TIMEOUT=timeout
+EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
+EXECUTE_TYPE=execute type
+START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
+GET_RECEIVER_CC_NOTES=query receiver cc
+DESC=description
+GROUP_NAME=group name
+GROUP_TYPE=group type
+QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
+UPDATE_ALERT_GROUP_NOTES=update alert group
+DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
+VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
+GRANT_ALERT_GROUP_NOTES=grant alert group
+USER_IDS=user id list
+ALERT_GROUP_TAG=alert group related operation
+CREATE_ALERT_GROUP_NOTES=create alert group
+WORKER_GROUP_TAG=worker group related operation
+SAVE_WORKER_GROUP_NOTES=create worker group
+WORKER_GROUP_NAME=worker group name
+WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
+QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
+QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
+DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
+DATA_ANALYSIS_TAG=analysis related operation of task state
+COUNT_TASK_STATE_NOTES=count task state
+COUNT_PROCESS_INSTANCE_NOTES=count process instance state
+COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
+COUNT_COMMAND_STATE_NOTES=count command state
+COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
+
+ACCESS_TOKEN_TAG=access token related operation
+MONITOR_TAG=monitor related operation
+MASTER_LIST_NOTES=master server list
+WORKER_LIST_NOTES=worker server list
+QUERY_DATABASE_STATE_NOTES=query database state
+QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
+TASK_STATE=task instance state
+SOURCE_TABLE=SOURCE TABLE
+DEST_TABLE=dest table
+TASK_DATE=task date
+QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
+DATA_SOURCE_TAG=data source related operation
+CREATE_DATA_SOURCE_NOTES=create data source
+DATA_SOURCE_NAME=data source name
+DATA_SOURCE_NOTE=data source desc
+DB_TYPE=database type
+DATA_SOURCE_HOST=DATA SOURCE HOST
+DATA_SOURCE_PORT=data source port
+DATABASE_NAME=database name
+QUEUE_TAG=queue related operation
+QUERY_QUEUE_LIST_NOTES=query queue list
+QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
+CREATE_QUEUE_NOTES=create queue
+YARN_QUEUE_NAME=yarn(hadoop) queue name
+QUEUE_ID=queue id
+TENANT_DESC=tenant desc
+QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
+QUERY_TENANT_LIST_NOTES=query tenant list
+UPDATE_TENANT_NOTES=update tenant
+DELETE_TENANT_NOTES=delete tenant
+RESOURCES_TAG=resource center related operation
+CREATE_RESOURCE_NOTES=create resource
+RESOURCE_TYPE=resource file type
+RESOURCE_NAME=resource name
+RESOURCE_DESC=resource file desc
+RESOURCE_FILE=resource file
+RESOURCE_ID=resource id
+QUERY_RESOURCE_LIST_NOTES=query resource list
+DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
+VIEW_RESOURCE_BY_ID_NOTES=view resource by id
+ONLINE_CREATE_RESOURCE_NOTES=online create resource
+SUFFIX=resource file suffix
+CONTENT=resource file content
+UPDATE_RESOURCE_NOTES=edit resource file online
+DOWNLOAD_RESOURCE_NOTES=download resource file
+CREATE_UDF_FUNCTION_NOTES=create udf function
+UDF_TYPE=UDF type
+FUNC_NAME=function name
+CLASS_NAME=package and class name
+ARG_TYPES=arguments
+UDF_DESC=udf desc
+VIEW_UDF_FUNCTION_NOTES=view udf function
+UPDATE_UDF_FUNCTION_NOTES=update udf function
+QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
+VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
+DELETE_UDF_FUNCTION_NOTES=delete udf function
+AUTHORIZED_FILE_NOTES=authorized file
+UNAUTHORIZED_FILE_NOTES=unauthorized file
+AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
+UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
+VERIFY_QUEUE_NOTES=verify queue
+TENANT_TAG=tenant related operation
+CREATE_TENANT_NOTES=create tenant
+TENANT_CODE=tenant code
+TENANT_NAME=tenant name
+QUEUE_NAME=queue name
+PASSWORD=password
+DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
+PROJECT_TAG=project related operation
+CREATE_PROJECT_NOTES=create project
+PROJECT_DESC=project description
+UPDATE_PROJECT_NOTES=update project
+PROJECT_ID=project id
+QUERY_PROJECT_BY_ID_NOTES=query project info by project id
+QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
+DELETE_PROJECT_BY_ID_NOTES=delete project by id
+QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
+QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
+TASK_RECORD_TAG=task record related operation
+QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
+CREATE_TOKEN_NOTES=create token ,note: please login first
+QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
+SCHEDULE=schedule
+WARNING_TYPE=warning type(sending strategy)
+WARNING_GROUP_ID=warning group id
+FAILURE_STRATEGY=failure strategy
+RECEIVERS=receivers
+RECEIVERS_CC=receivers cc
+WORKER_GROUP_ID=worker server group id
+PROCESS_INSTANCE_PRIORITY=process instance priority
+UPDATE_SCHEDULE_NOTES=update schedule
+SCHEDULE_ID=schedule id
+ONLINE_SCHEDULE_NOTES=online schedule
+OFFLINE_SCHEDULE_NOTES=offline schedule
+QUERY_SCHEDULE_NOTES=query schedule
+QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
+LOGIN_TAG=User login related operations
+USER_NAME=user name
+PROJECT_NAME=project name
+CREATE_PROCESS_DEFINITION_NOTES=create process definition
+PROCESS_DEFINITION_NAME=process definition name
+PROCESS_DEFINITION_JSON=process definition detail info (json format)
+PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
+PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
+PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
+PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
+PROCESS_DEFINITION_DESC=process definition desc
+PROCESS_DEFINITION_TAG=process definition related opertation
+SIGNOUT_NOTES=logout
+USER_PASSWORD=user password
+UPDATE_PROCESS_INSTANCE_NOTES=update process instance
+QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
+VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
+LOGIN_NOTES=user login
+UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
+PROCESS_DEFINITION_ID=process definition id
+RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
+QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
+QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
+QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
+PAGE_NO=page no
+PROCESS_INSTANCE_ID=process instance id
+PROCESS_INSTANCE_JSON=process instance info(json format)
+SCHEDULE_TIME=schedule time
+SYNC_DEFINE=update the information of the process instance to the process definition\
+
+RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
+SEARCH_VAL=search val
+USER_ID=user id
+PAGE_SIZE=page size
+LIMIT=limit
+VIEW_TREE_NOTES=view tree
+GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
+PROCESS_DEFINITION_ID_LIST=process definition id list
+QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
+DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
+TASK_ID=task instance id
+SKIP_LINE_NUM=skip line num
+QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
+DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
+USERS_TAG=users related operation
+SCHEDULER_TAG=scheduler related operation
+CREATE_SCHEDULE_NOTES=create schedule
+CREATE_USER_NOTES=create user
+TENANT_ID=tenant id
+QUEUE=queue
+EMAIL=email
+PHONE=phone
+QUERY_USER_LIST_NOTES=query user list
+UPDATE_USER_NOTES=update user
+DELETE_USER_BY_ID_NOTES=delete user by id
+GRANT_PROJECT_NOTES=GRANT PROJECT
+PROJECT_IDS=project ids(string format, multiple projects separated by ",")
+GRANT_RESOURCE_NOTES=grant resource file
+RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
+GET_USER_INFO_NOTES=get user info
+LIST_USER_NOTES=list user
+VERIFY_USER_NAME_NOTES=verify user name
+UNAUTHORIZED_USER_NOTES=cancel authorization
+ALERT_GROUP_ID=alert group id
+AUTHORIZED_USER_NOTES=authorized user
+GRANT_UDF_FUNC_NOTES=grant udf function
+UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
+GRANT_DATASOURCE_NOTES=grant datasource
+DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
+QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
+QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
+QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
+VIEW_GANTT_NOTES=view gantt
+SUB_PROCESS_INSTANCE_ID=sub process instance id
+TASK_NAME=task instance name
+TASK_INSTANCE_TAG=task instance related operation
+LOGGER_TAG=log related operation
+PROCESS_INSTANCE_TAG=process instance related operation
+EXECUTION_STATUS=runing status for workflow and task nodes
+HOST=ip address of running task
+START_DATE=start date
+END_DATE=end date
+QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
+UPDATE_DATA_SOURCE_NOTES=update data source
+DATA_SOURCE_ID=DATA SOURCE ID
+QUERY_DATA_SOURCE_NOTES=query data source by id
+QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
+QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
+CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
+CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
+DELETE_DATA_SOURCE_NOTES=delete data source
+VERIFY_DATA_SOURCE_NOTES=verify data source
+UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
+AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
+DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_en_US.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_en_US.properties
new file mode 100644
index 0000000000..a663c71013
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_en_US.properties
@@ -0,0 +1,229 @@
+QUERY_SCHEDULE_LIST_NOTES=query schedule list
+EXECUTE_PROCESS_TAG=execute process related operation
+PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
+RUN_PROCESS_INSTANCE_NOTES=run process instance
+START_NODE_LIST=start node list(node name)
+TASK_DEPEND_TYPE=task depend type
+COMMAND_TYPE=command type
+RUN_MODE=run mode
+TIMEOUT=timeout
+EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
+EXECUTE_TYPE=execute type
+START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
+GET_RECEIVER_CC_NOTES=query receiver cc
+DESC=description
+GROUP_NAME=group name
+GROUP_TYPE=group type
+QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
+UPDATE_ALERT_GROUP_NOTES=update alert group
+DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
+VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
+GRANT_ALERT_GROUP_NOTES=grant alert group
+USER_IDS=user id list
+ALERT_GROUP_TAG=alert group related operation
+CREATE_ALERT_GROUP_NOTES=create alert group
+WORKER_GROUP_TAG=worker group related operation
+SAVE_WORKER_GROUP_NOTES=create worker group
+WORKER_GROUP_NAME=worker group name
+WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
+QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
+QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
+DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
+DATA_ANALYSIS_TAG=analysis related operation of task state
+COUNT_TASK_STATE_NOTES=count task state
+COUNT_PROCESS_INSTANCE_NOTES=count process instance state
+COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
+COUNT_COMMAND_STATE_NOTES=count command state
+COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
+
+ACCESS_TOKEN_TAG=access token related operation
+MONITOR_TAG=monitor related operation
+MASTER_LIST_NOTES=master server list
+WORKER_LIST_NOTES=worker server list
+QUERY_DATABASE_STATE_NOTES=query database state
+QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
+TASK_STATE=task instance state
+SOURCE_TABLE=SOURCE TABLE
+DEST_TABLE=dest table
+TASK_DATE=task date
+QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
+DATA_SOURCE_TAG=data source related operation
+CREATE_DATA_SOURCE_NOTES=create data source
+DATA_SOURCE_NAME=data source name
+DATA_SOURCE_NOTE=data source desc
+DB_TYPE=database type
+DATA_SOURCE_HOST=DATA SOURCE HOST
+DATA_SOURCE_PORT=data source port
+DATABASE_NAME=database name
+QUEUE_TAG=queue related operation
+QUERY_QUEUE_LIST_NOTES=query queue list
+QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
+CREATE_QUEUE_NOTES=create queue
+YARN_QUEUE_NAME=yarn(hadoop) queue name
+QUEUE_ID=queue id
+TENANT_DESC=tenant desc
+QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
+QUERY_TENANT_LIST_NOTES=query tenant list
+UPDATE_TENANT_NOTES=update tenant
+DELETE_TENANT_NOTES=delete tenant
+RESOURCES_TAG=resource center related operation
+CREATE_RESOURCE_NOTES=create resource
+RESOURCE_TYPE=resource file type
+RESOURCE_NAME=resource name
+RESOURCE_DESC=resource file desc
+RESOURCE_FILE=resource file
+RESOURCE_ID=resource id
+QUERY_RESOURCE_LIST_NOTES=query resource list
+DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
+VIEW_RESOURCE_BY_ID_NOTES=view resource by id
+ONLINE_CREATE_RESOURCE_NOTES=online create resource
+SUFFIX=resource file suffix
+CONTENT=resource file content
+UPDATE_RESOURCE_NOTES=edit resource file online
+DOWNLOAD_RESOURCE_NOTES=download resource file
+CREATE_UDF_FUNCTION_NOTES=create udf function
+UDF_TYPE=UDF type
+FUNC_NAME=function name
+CLASS_NAME=package and class name
+ARG_TYPES=arguments
+UDF_DESC=udf desc
+VIEW_UDF_FUNCTION_NOTES=view udf function
+UPDATE_UDF_FUNCTION_NOTES=update udf function
+QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
+VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
+DELETE_UDF_FUNCTION_NOTES=delete udf function
+AUTHORIZED_FILE_NOTES=authorized file
+UNAUTHORIZED_FILE_NOTES=unauthorized file
+AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
+UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
+VERIFY_QUEUE_NOTES=verify queue
+TENANT_TAG=tenant related operation
+CREATE_TENANT_NOTES=create tenant
+TENANT_CODE=tenant code
+TENANT_NAME=tenant name
+QUEUE_NAME=queue name
+PASSWORD=password
+DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
+PROJECT_TAG=project related operation
+CREATE_PROJECT_NOTES=create project
+PROJECT_DESC=project description
+UPDATE_PROJECT_NOTES=update project
+PROJECT_ID=project id
+QUERY_PROJECT_BY_ID_NOTES=query project info by project id
+QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
+DELETE_PROJECT_BY_ID_NOTES=delete project by id
+QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
+QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
+TASK_RECORD_TAG=task record related operation
+QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
+CREATE_TOKEN_NOTES=create token ,note: please login first
+QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
+SCHEDULE=schedule
+WARNING_TYPE=warning type(sending strategy)
+WARNING_GROUP_ID=warning group id
+FAILURE_STRATEGY=failure strategy
+RECEIVERS=receivers
+RECEIVERS_CC=receivers cc
+WORKER_GROUP_ID=worker server group id
+PROCESS_INSTANCE_PRIORITY=process instance priority
+UPDATE_SCHEDULE_NOTES=update schedule
+SCHEDULE_ID=schedule id
+ONLINE_SCHEDULE_NOTES=online schedule
+OFFLINE_SCHEDULE_NOTES=offline schedule
+QUERY_SCHEDULE_NOTES=query schedule
+QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
+LOGIN_TAG=User login related operations
+USER_NAME=user name
+PROJECT_NAME=project name
+CREATE_PROCESS_DEFINITION_NOTES=create process definition
+PROCESS_DEFINITION_NAME=process definition name
+PROCESS_DEFINITION_JSON=process definition detail info (json format)
+PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
+PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
+PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
+PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
+PROCESS_DEFINITION_DESC=process definition desc
+PROCESS_DEFINITION_TAG=process definition related opertation
+SIGNOUT_NOTES=logout
+USER_PASSWORD=user password
+UPDATE_PROCESS_INSTANCE_NOTES=update process instance
+QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
+VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
+LOGIN_NOTES=user login
+UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
+PROCESS_DEFINITION_ID=process definition id
+RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
+QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
+QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
+QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
+PAGE_NO=page no
+PROCESS_INSTANCE_ID=process instance id
+PROCESS_INSTANCE_JSON=process instance info(json format)
+SCHEDULE_TIME=schedule time
+SYNC_DEFINE=update the information of the process instance to the process definition\
+
+RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
+SEARCH_VAL=search val
+USER_ID=user id
+PAGE_SIZE=page size
+LIMIT=limit
+VIEW_TREE_NOTES=view tree
+GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
+PROCESS_DEFINITION_ID_LIST=process definition id list
+QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
+DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
+TASK_ID=task instance id
+SKIP_LINE_NUM=skip line num
+QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
+DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
+USERS_TAG=users related operation
+SCHEDULER_TAG=scheduler related operation
+CREATE_SCHEDULE_NOTES=create schedule
+CREATE_USER_NOTES=create user
+TENANT_ID=tenant id
+QUEUE=queue
+EMAIL=email
+PHONE=phone
+QUERY_USER_LIST_NOTES=query user list
+UPDATE_USER_NOTES=update user
+DELETE_USER_BY_ID_NOTES=delete user by id
+GRANT_PROJECT_NOTES=GRANT PROJECT
+PROJECT_IDS=project ids(string format, multiple projects separated by ",")
+GRANT_RESOURCE_NOTES=grant resource file
+RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
+GET_USER_INFO_NOTES=get user info
+LIST_USER_NOTES=list user
+VERIFY_USER_NAME_NOTES=verify user name
+UNAUTHORIZED_USER_NOTES=cancel authorization
+ALERT_GROUP_ID=alert group id
+AUTHORIZED_USER_NOTES=authorized user
+GRANT_UDF_FUNC_NOTES=grant udf function
+UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
+GRANT_DATASOURCE_NOTES=grant datasource
+DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
+QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
+QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
+QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
+VIEW_GANTT_NOTES=view gantt
+SUB_PROCESS_INSTANCE_ID=sub process instance id
+TASK_NAME=task instance name
+TASK_INSTANCE_TAG=task instance related operation
+LOGGER_TAG=log related operation
+PROCESS_INSTANCE_TAG=process instance related operation
+EXECUTION_STATUS=runing status for workflow and task nodes
+HOST=ip address of running task
+START_DATE=start date
+END_DATE=end date
+QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
+UPDATE_DATA_SOURCE_NOTES=update data source
+DATA_SOURCE_ID=DATA SOURCE ID
+QUERY_DATA_SOURCE_NOTES=query data source by id
+QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
+QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
+CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
+CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
+DELETE_DATA_SOURCE_NOTES=delete data source
+VERIFY_DATA_SOURCE_NOTES=verify data source
+UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
+AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
+DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_zh_CN.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_zh_CN.properties
new file mode 100644
index 0000000000..b0d6694d2b
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_zh_CN.properties
@@ -0,0 +1,227 @@
+QUERY_SCHEDULE_LIST_NOTES=查询定时列表
+PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作
+RUN_PROCESS_INSTANCE_NOTES=运行流程实例
+START_NODE_LIST=开始节点列表(节点name)
+TASK_DEPEND_TYPE=任务依赖类型
+COMMAND_TYPE=指令类型
+RUN_MODE=运行模式
+TIMEOUT=超时时间
+EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等)
+EXECUTE_TYPE=执行类型
+START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义
+DESC=备注(描述)
+GROUP_NAME=组名称
+GROUP_TYPE=组类型
+QUERY_ALERT_GROUP_LIST_NOTES=告警组列表\
+
+UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组
+DELETE_ALERT_GROUP_BY_ID_NOTES=删除告警组通过ID
+VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在
+GRANT_ALERT_GROUP_NOTES=授权告警组
+USER_IDS=用户ID列表
+ALERT_GROUP_TAG=告警组相关操作
+WORKER_GROUP_TAG=Worker分组管理
+SAVE_WORKER_GROUP_NOTES=创建Worker分组\
+
+WORKER_GROUP_NAME=Worker分组名称
+WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割\
+
+QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理
+QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组
+DELETE_WORKER_GROUP_BY_ID_NOTES=删除worker group通过ID
+DATA_ANALYSIS_TAG=任务状态分析相关操作
+COUNT_TASK_STATE_NOTES=任务状态统计
+COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态
+COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义
+COUNT_COMMAND_STATE_NOTES=统计命令状态
+COUNT_QUEUE_STATE_NOTES=统计队列里任务状态
+ACCESS_TOKEN_TAG=access token相关操作,需要先登录
+MONITOR_TAG=监控相关操作
+MASTER_LIST_NOTES=master服务列表
+WORKER_LIST_NOTES=worker服务列表
+QUERY_DATABASE_STATE_NOTES=查询数据库状态
+QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态
+TASK_STATE=任务实例状态
+SOURCE_TABLE=源表
+DEST_TABLE=目标表
+TASK_DATE=任务时间
+QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表
+DATA_SOURCE_TAG=数据源相关操作
+CREATE_DATA_SOURCE_NOTES=创建数据源
+DATA_SOURCE_NAME=数据源名称
+DATA_SOURCE_NOTE=数据源描述
+DB_TYPE=数据源类型
+DATA_SOURCE_HOST=IP主机名
+DATA_SOURCE_PORT=数据源端口
+DATABASE_NAME=数据库名
+QUEUE_TAG=队列相关操作
+QUERY_QUEUE_LIST_NOTES=查询队列列表
+QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表
+CREATE_QUEUE_NOTES=创建队列
+YARN_QUEUE_NAME=hadoop yarn队列名
+QUEUE_ID=队列ID
+TENANT_DESC=租户描述
+QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表
+QUERY_TENANT_LIST_NOTES=查询租户列表
+UPDATE_TENANT_NOTES=更新租户
+DELETE_TENANT_NOTES=删除租户
+RESOURCES_TAG=资源中心相关操作
+CREATE_RESOURCE_NOTES=创建资源
+RESOURCE_TYPE=资源文件类型
+RESOURCE_NAME=资源文件名称
+RESOURCE_DESC=资源文件描述
+RESOURCE_FILE=资源文件
+RESOURCE_ID=资源ID
+QUERY_RESOURCE_LIST_NOTES=查询资源列表
+DELETE_RESOURCE_BY_ID_NOTES=删除资源通过ID
+VIEW_RESOURCE_BY_ID_NOTES=浏览资源通通过ID
+ONLINE_CREATE_RESOURCE_NOTES=在线创建资源
+SUFFIX=资源文件后缀
+CONTENT=资源文件内容
+UPDATE_RESOURCE_NOTES=在线更新资源文件
+DOWNLOAD_RESOURCE_NOTES=下载资源文件
+CREATE_UDF_FUNCTION_NOTES=创建UDF函数
+UDF_TYPE=UDF类型
+FUNC_NAME=函数名称
+CLASS_NAME=包名类名
+ARG_TYPES=参数
+UDF_DESC=udf描述,使用说明
+VIEW_UDF_FUNCTION_NOTES=查看udf函数
+UPDATE_UDF_FUNCTION_NOTES=更新udf函数
+QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表
+VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名
+DELETE_UDF_FUNCTION_NOTES=删除UDF函数
+AUTHORIZED_FILE_NOTES=授权文件
+UNAUTHORIZED_FILE_NOTES=取消授权文件
+AUTHORIZED_UDF_FUNC_NOTES=授权udf函数
+UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权
+VERIFY_QUEUE_NOTES=验证队列
+TENANT_TAG=租户相关操作
+CREATE_TENANT_NOTES=创建租户
+TENANT_CODE=租户编码
+TENANT_NAME=租户名称
+QUEUE_NAME=队列名
+PASSWORD=密码
+DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...}
+PROJECT_TAG=项目相关操作
+CREATE_PROJECT_NOTES=创建项目
+PROJECT_DESC=项目描述
+UPDATE_PROJECT_NOTES=更新项目
+PROJECT_ID=项目ID
+QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息
+QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表
+DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID
+QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目
+QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目
+TASK_RECORD_TAG=任务记录相关操作
+QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表
+CREATE_TOKEN_NOTES=创建token,注意需要先登录
+QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表
+SCHEDULE=定时
+WARNING_TYPE=发送策略
+WARNING_GROUP_ID=发送组ID
+FAILURE_STRATEGY=失败策略
+RECEIVERS=收件人
+RECEIVERS_CC=收件人(抄送)
+WORKER_GROUP_ID=Worker Server分组ID
+PROCESS_INSTANCE_PRIORITY=流程实例优先级
+UPDATE_SCHEDULE_NOTES=更新定时
+SCHEDULE_ID=定时ID
+ONLINE_SCHEDULE_NOTES=定时上线
+OFFLINE_SCHEDULE_NOTES=定时下线
+QUERY_SCHEDULE_NOTES=查询定时
+QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时
+LOGIN_TAG=用户登录相关操作
+USER_NAME=用户名
+PROJECT_NAME=项目名称
+CREATE_PROCESS_DEFINITION_NOTES=创建流程定义
+PROCESS_DEFINITION_NAME=流程定义名称
+PROCESS_DEFINITION_JSON=流程定义详细信息(json格式)
+PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式)
+PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式)
+PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式)
+PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式)
+PROCESS_DEFINITION_DESC=流程定义描述信息
+PROCESS_DEFINITION_TAG=流程定义相关操作
+SIGNOUT_NOTES=退出登录
+USER_PASSWORD=用户密码
+UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例
+QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表
+VERIFY_PROCCESS_DEFINITION_NAME_NOTES=验证流程定义名字
+LOGIN_NOTES=用户登录
+UPDATE_PROCCESS_DEFINITION_NOTES=更新流程定义
+PROCESS_DEFINITION_ID=流程定义ID
+RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义
+QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID
+QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表
+QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表
+PAGE_NO=页码号
+PROCESS_INSTANCE_ID=流程实例ID
+PROCESS_INSTANCE_JSON=流程实例信息(json格式)
+SCHEDULE_TIME=定时时间
+SYNC_DEFINE=更新流程实例的信息是否同步到流程定义
+RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例
+SEARCH_VAL=搜索值
+USER_ID=用户ID
+PAGE_SIZE=页大小
+LIMIT=显示多少条
+VIEW_TREE_NOTES=树状图
+GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID
+PROCESS_DEFINITION_ID_LIST=流程定义id列表
+QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID
+DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID
+TASK_ID=任务实例ID
+SKIP_LINE_NUM=忽略行数
+QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志
+DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志
+USERS_TAG=用户相关操作
+SCHEDULER_TAG=定时相关操作
+CREATE_SCHEDULE_NOTES=创建定时
+CREATE_USER_NOTES=创建用户
+TENANT_ID=租户ID
+QUEUE=使用的队列
+EMAIL=邮箱
+PHONE=手机号
+QUERY_USER_LIST_NOTES=查询用户列表
+UPDATE_USER_NOTES=更新用户
+DELETE_USER_BY_ID_NOTES=删除用户通过ID
+GRANT_PROJECT_NOTES=授权项目
+PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割)
+GRANT_RESOURCE_NOTES=授权资源文件
+RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割)
+GET_USER_INFO_NOTES=获取用户信息
+LIST_USER_NOTES=用户列表
+VERIFY_USER_NAME_NOTES=验证用户名
+UNAUTHORIZED_USER_NOTES=取消授权
+ALERT_GROUP_ID=报警组ID
+AUTHORIZED_USER_NOTES=授权用户
+GRANT_UDF_FUNC_NOTES=授权udf函数
+UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割)
+GRANT_DATASOURCE_NOTES=授权数据源
+DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割)
+QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=查询子流程实例通过任务实例ID
+QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=查询父流程实例信息通过子流程实例ID
+QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量
+VIEW_GANTT_NOTES=浏览Gantt图
+SUB_PROCESS_INSTANCE_ID=子流程是咧ID
+TASK_NAME=任务实例名
+TASK_INSTANCE_TAG=任务实例相关操作
+LOGGER_TAG=日志相关操作
+PROCESS_INSTANCE_TAG=流程实例相关操作
+EXECUTION_STATUS=工作流和任务节点的运行状态
+HOST=运行任务的主机IP地址
+START_DATE=开始时间
+END_DATE=结束时间
+QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表
+UPDATE_DATA_SOURCE_NOTES=更新数据源
+DATA_SOURCE_ID=数据源ID
+QUERY_DATA_SOURCE_NOTES=查询数据源通过ID
+QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=查询数据源列表通过数据源类型
+QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表
+CONNECT_DATA_SOURCE_NOTES=连接数据源
+CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试
+DELETE_DATA_SOURCE_NOTES=删除数据源
+VERIFY_DATA_SOURCE_NOTES=验证数据源
+UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源
+AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源
+DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl
new file mode 100644
index 0000000000..0ff763fa28
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl
@@ -0,0 +1 @@
+ easyscheduler<#if title??> ${title}#if><#if content??> ${content}#if>
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties
new file mode 100644
index 0000000000..9080defc7b
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties
@@ -0,0 +1,21 @@
+# master execute thread num
+master.exec.threads=100
+
+# master execute task number in parallel
+master.exec.task.number=20
+
+# master heartbeat interval
+master.heartbeat.interval=10
+
+# master commit task retry times
+master.task.commit.retryTimes=5
+
+# master commit task interval
+master.task.commit.interval=100
+
+
+# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
+master.max.cpuload.avg=10
+
+# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
+master.reserved.memory=1
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml
new file mode 100644
index 0000000000..d93878218e
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml
@@ -0,0 +1,34 @@
+
+
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+ ${log.base}/escheduler-master.log
+
+ INFO
+
+
+ ${log.base}/escheduler-master.%d{yyyy-MM-dd_HH}.%i.log
+ 168
+ 200MB
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/quartz.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/quartz.properties
new file mode 100644
index 0000000000..21c5feb321
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/quartz.properties
@@ -0,0 +1,39 @@
+#============================================================================
+# Configure Main Scheduler Properties
+#============================================================================
+org.quartz.scheduler.instanceName = EasyScheduler
+org.quartz.scheduler.instanceId = AUTO
+org.quartz.scheduler.makeSchedulerThreadDaemon = true
+org.quartz.jobStore.useProperties = false
+
+#============================================================================
+# Configure ThreadPool
+#============================================================================
+
+org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
+org.quartz.threadPool.makeThreadsDaemons = true
+org.quartz.threadPool.threadCount = 25
+org.quartz.threadPool.threadPriority = 5
+
+#============================================================================
+# Configure JobStore
+#============================================================================
+
+org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
+org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
+org.quartz.jobStore.tablePrefix = QRTZ_
+org.quartz.jobStore.isClustered = true
+org.quartz.jobStore.misfireThreshold = 60000
+org.quartz.jobStore.clusterCheckinInterval = 5000
+org.quartz.jobStore.dataSource = myDs
+
+#============================================================================
+# Configure Datasources
+#============================================================================
+
+org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
+org.quartz.dataSource.myDs.URL=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=utf8
+org.quartz.dataSource.myDs.user=root
+org.quartz.dataSource.myDs.password=root@123
+org.quartz.dataSource.myDs.maxConnections = 10
+org.quartz.dataSource.myDs.validationQuery = select 1
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties
new file mode 100644
index 0000000000..e58bd86dcf
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties
@@ -0,0 +1,15 @@
+# worker execute thread num
+worker.exec.threads=100
+
+# worker heartbeat interval
+worker.heartbeat.interval=10
+
+# submit the number of tasks at a time
+worker.fetch.task.num = 3
+
+
+# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
+#worker.max.cpuload.avg=10
+
+# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
+worker.reserved.memory=1
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml
new file mode 100644
index 0000000000..32914ec84f
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+ INFO
+
+
+ ${log.base}/{processDefinitionId}/{processInstanceId}/{taskInstanceId}.log
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+ true
+
+
+
+ ${log.base}/escheduler-worker.log
+
+ INFO
+
+
+
+ ${log.base}/escheduler-worker.%d{yyyy-MM-dd_HH}.%i.log
+ 168
+ 200MB
+
+
+
+
+ [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
+
+ UTF-8
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/zookeeper.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/zookeeper.properties
new file mode 100644
index 0000000000..5f14df49b7
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/zookeeper.properties
@@ -0,0 +1,25 @@
+#zookeeper cluster
+zookeeper.quorum=127.0.0.1:2181
+
+#escheduler root directory
+zookeeper.escheduler.root=/escheduler
+
+#zookeeper server dirctory
+zookeeper.escheduler.dead.servers=/escheduler/dead-servers
+zookeeper.escheduler.masters=/escheduler/masters
+zookeeper.escheduler.workers=/escheduler/workers
+
+#zookeeper lock dirctory
+zookeeper.escheduler.lock.masters=/escheduler/lock/masters
+zookeeper.escheduler.lock.workers=/escheduler/lock/workers
+
+#escheduler failover directory
+zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters
+zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers
+zookeeper.escheduler.lock.failover.startup.masters=/escheduler/lock/failover/startup-masters
+
+#escheduler failover directory
+zookeeper.session.timeout=300
+zookeeper.connection.timeout=300
+zookeeper.retry.sleep=1000
+zookeeper.retry.maxtime=5
\ No newline at end of file
diff --git a/dockerfile/dockerfile-1.1.x/conf/maven/settings.xml b/dockerfile/dockerfile-1.1.x/conf/maven/settings.xml
new file mode 100644
index 0000000000..6bdea4a1bf
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/maven/settings.xml
@@ -0,0 +1,263 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ nexus-aliyun
+ central
+ Nexus aliyun
+ http://maven.aliyun.com/nexus/content/groups/public
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dockerfile/dockerfile-1.1.x/conf/nginx/default.conf b/dockerfile/dockerfile-1.1.x/conf/nginx/default.conf
new file mode 100644
index 0000000000..2d43c32b63
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/nginx/default.conf
@@ -0,0 +1,31 @@
+server {
+ listen 8888;
+ server_name localhost;
+ #charset koi8-r;
+ #access_log /var/log/nginx/host.access.log main;
+ location / {
+ root /opt/easyscheduler_source/escheduler-ui/dist;
+ index index.html index.html;
+ }
+ location /escheduler {
+ proxy_pass http://127.0.0.1:12345;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header x_real_ipP $remote_addr;
+ proxy_set_header remote_addr $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_http_version 1.1;
+ proxy_connect_timeout 300s;
+ proxy_read_timeout 300s;
+ proxy_send_timeout 300s;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ }
+ #error_page 404 /404.html;
+ # redirect server error pages to the static page /50x.html
+ #
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root /usr/share/nginx/html;
+ }
+}
diff --git a/dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg b/dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg
new file mode 100644
index 0000000000..a5a2c0bbe3
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg
@@ -0,0 +1,28 @@
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just
+# example sakes.
+dataDir=/tmp/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# the maximum number of client connections.
+# increase this if you need to handle more clients
+#maxClientCnxns=60
+#
+# Be sure to read the maintenance section of the
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1
diff --git a/dockerfile/dockerfile-1.1.x/hooks/build b/dockerfile/dockerfile-1.1.x/hooks/build
new file mode 100644
index 0000000000..779c38e66f
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/hooks/build
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+echo "------ escheduler start - build -------"
+printenv
+
+docker build --build-arg version=$version --build-arg tar_version=$tar_version -t $DOCKER_REPO:$version .
+
+echo "------ escheduler end - build -------"
diff --git a/dockerfile/dockerfile-1.1.x/hooks/push b/dockerfile/dockerfile-1.1.x/hooks/push
new file mode 100644
index 0000000000..7b98da1a8d
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/hooks/push
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+echo "------ push start -------"
+printenv
+
+docker push $DOCKER_REPO:$version
+
+echo "------ push end -------"
diff --git a/dockerfile/dockerfile-1.1.x/startup.sh b/dockerfile/dockerfile-1.1.x/startup.sh
new file mode 100644
index 0000000000..2d40fb5071
--- /dev/null
+++ b/dockerfile/dockerfile-1.1.x/startup.sh
@@ -0,0 +1,80 @@
+#! /bin/bash
+
+set -e
+if [ `netstat -anop|grep mysql|wc -l` -gt 0 ];then
+ echo "MySQL is Running."
+else
+ MYSQL_ROOT_PWD="root@123"
+ ESZ_DB="escheduler"
+ echo "启动mysql服务"
+ chown -R mysql:mysql /var/lib/mysql /var/run/mysqld
+ find /var/lib/mysql -type f -exec touch {} \; && service mysql restart $ sleep 10
+ if [ ! -f /nohup.out ];then
+ echo "设置mysql密码"
+ mysql --user=root --password=root -e "UPDATE mysql.user set authentication_string=password('$MYSQL_ROOT_PWD') where user='root'; FLUSH PRIVILEGES;"
+
+ echo "设置mysql权限"
+ mysql --user=root --password=$MYSQL_ROOT_PWD -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '$MYSQL_ROOT_PWD' WITH GRANT OPTION; FLUSH PRIVILEGES;"
+ echo "创建escheduler数据库"
+ mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
+ echo "导入mysql数据"
+ nohup /opt/escheduler/script/create_escheduler.sh &
+ sleep 90
+ fi
+
+ if [ `mysql --user=root --password=$MYSQL_ROOT_PWD -s -r -e "SELECT count(TABLE_NAME) FROM information_schema.TABLES WHERE TABLE_SCHEMA='escheduler';" | grep -v count` -eq 38 ];then
+ echo "\`$ESZ_DB\` 表个数正确"
+ else
+ echo "\`$ESZ_DB\` 表个数不正确"
+ mysql --user=root --password=$MYSQL_ROOT_PWD -e "DROP DATABASE \`$ESZ_DB\`;"
+ echo "创建escheduler数据库"
+ mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
+ echo "导入mysql数据"
+ nohup /opt/escheduler/script/create_escheduler.sh &
+ fi
+fi
+
+/opt/zookeeper/bin/zkServer.sh restart
+
+sleep 10
+
+echo "启动api-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop api-server
+/opt/escheduler/bin/escheduler-daemon.sh start api-server
+
+
+
+echo "启动master-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop master-server
+python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/masters
+/opt/escheduler/bin/escheduler-daemon.sh start master-server
+
+echo "启动worker-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop worker-server
+python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/workers
+/opt/escheduler/bin/escheduler-daemon.sh start worker-server
+
+
+echo "启动logger-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop logger-server
+/opt/escheduler/bin/escheduler-daemon.sh start logger-server
+
+
+echo "启动alert-server"
+/opt/escheduler/bin/escheduler-daemon.sh stop alert-server
+/opt/escheduler/bin/escheduler-daemon.sh start alert-server
+
+
+
+
+
+echo "启动nginx"
+/etc/init.d/nginx stop
+nginx &
+
+
+while true
+do
+ sleep 101
+done
+exec "$@"