Browse Source

Merge pull request #608 from qiaozhanwei/dev

docker file commit
pull/2/head
乔占卫 6 years ago committed by GitHub
parent
commit
7d0bf7aa92
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 145
      dockerfile/dockerfile-1.0.x/Dockerfile
  2. 21
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties
  3. 31
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert_logback.xml
  4. 42
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/apiserver_logback.xml
  5. 16
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties
  6. 1
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application_master.properties
  7. 27
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties
  8. 8
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties
  9. 3
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/install_config.conf
  10. 4
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/run_config.conf
  11. 53
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/dao/data_source.properties
  12. 4
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh
  13. 1
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl
  14. 21
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master.properties
  15. 34
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master_logback.xml
  16. 39
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties
  17. 15
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker.properties
  18. 53
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker_logback.xml
  19. 25
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties
  20. 263
      dockerfile/dockerfile-1.0.x/conf/maven/settings.xml
  21. 31
      dockerfile/dockerfile-1.0.x/conf/nginx/default.conf
  22. 28
      dockerfile/dockerfile-1.0.x/conf/zookeeper/zoo.cfg
  23. 8
      dockerfile/dockerfile-1.0.x/hooks/build
  24. 8
      dockerfile/dockerfile-1.0.x/hooks/push
  25. 79
      dockerfile/dockerfile-1.0.x/startup.sh
  26. 146
      dockerfile/dockerfile-1.1.x/Dockerfile
  27. 30
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert.properties
  28. 31
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml
  29. 42
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml
  30. 19
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application.properties
  31. 1
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties
  32. 42
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/common.properties
  33. 18
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/hadoop/hadoop.properties
  34. 3
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf
  35. 4
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf
  36. 53
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties
  37. 3
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/env/.escheduler_env.sh
  38. 229
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages.properties
  39. 229
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_en_US.properties
  40. 227
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_zh_CN.properties
  41. 1
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl
  42. 21
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties
  43. 34
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml
  44. 39
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/quartz.properties
  45. 15
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties
  46. 53
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml
  47. 25
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/zookeeper.properties
  48. 263
      dockerfile/dockerfile-1.1.x/conf/maven/settings.xml
  49. 31
      dockerfile/dockerfile-1.1.x/conf/nginx/default.conf
  50. 28
      dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg
  51. 8
      dockerfile/dockerfile-1.1.x/hooks/build
  52. 8
      dockerfile/dockerfile-1.1.x/hooks/push
  53. 80
      dockerfile/dockerfile-1.1.x/startup.sh

145
dockerfile/dockerfile-1.0.x/Dockerfile

@ -0,0 +1,145 @@
FROM ubuntu:18.04
MAINTAINER journey "825193156@qq.com"
ENV LANG=C.UTF-8
ARG version
#1,安装jdk
RUN apt-get update \
&& apt-get -y install openjdk-8-jdk \
&& rm -rf /var/lib/apt/lists/*
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
ENV PATH $JAVA_HOME/bin:$PATH
#安装wget
RUN apt-get update && \
apt-get -y install wget
#2,安装ZK
#RUN cd /opt && \
# wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz && \
# tar -zxvf zookeeper-3.4.6.tar.gz && \
# mv zookeeper-3.4.6 zookeeper && \
# rm -rf ./zookeeper-*tar.gz && \
# mkdir -p /tmp/zookeeper && \
# rm -rf /opt/zookeeper/conf/zoo_sample.cfg
RUN cd /opt && \
wget https://www-us.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz && \
tar -zxvf zookeeper-3.4.14.tar.gz && \
mv zookeeper-3.4.14 zookeeper && \
rm -rf ./zookeeper-*tar.gz && \
mkdir -p /tmp/zookeeper && \
rm -rf /opt/zookeeper/conf/zoo_sample.cfg
ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
ENV ZK_HOME=/opt/zookeeper
ENV PATH $PATH:$ZK_HOME/bin
#3,安装maven
RUN cd /opt && \
wget http://apache-mirror.rbc.ru/pub/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz && \
tar -zxvf apache-maven-3.3.9-bin.tar.gz && \
mv apache-maven-3.3.9 maven && \
rm -rf ./apache-maven-*tar.gz && \
rm -rf /opt/maven/conf/settings.xml
ADD ./conf/maven/settings.xml /opt/maven/conf
ENV MAVEN_HOME=/opt/maven
ENV PATH $PATH:$MAVEN_HOME/bin
#4,安装node
RUN cd /opt && \
wget https://nodejs.org/download/release/v8.9.4/node-v8.9.4-linux-x64.tar.gz && \
tar -zxvf node-v8.9.4-linux-x64.tar.gz && \
mv node-v8.9.4-linux-x64 node && \
rm -rf ./node-v8.9.4-*tar.gz
ENV NODE_HOME=/opt/node
ENV PATH $PATH:$NODE_HOME/bin
#5,下载escheduler
RUN cd /opt && \
wget https://github.com/analysys/EasyScheduler/archive/${version}.tar.gz && \
tar -zxvf ${version}.tar.gz && \
mv EasyScheduler-${version} easyscheduler_source && \
rm -rf ./${version}.tar.gz
#6,后端编译
RUN cd /opt/easyscheduler_source && \
mvn -U clean package assembly:assembly -Dmaven.test.skip=true
#7,前端编译
RUN chmod -R 777 /opt/easyscheduler_source/escheduler-ui && \
cd /opt/easyscheduler_source/escheduler-ui && \
rm -rf /opt/easyscheduler_source/escheduler-ui/node_modules && \
npm install node-sass --unsafe-perm && \
npm install && \
npm run build
#8,安装mysql
RUN echo "deb http://cn.archive.ubuntu.com/ubuntu/ xenial main restricted universe multiverse" >> /etc/apt/sources.list
RUN echo "mysql-server mysql-server/root_password password root" | debconf-set-selections
RUN echo "mysql-server mysql-server/root_password_again password root" | debconf-set-selections
RUN apt-get update && \
apt-get -y install mysql-server-5.7 && \
mkdir -p /var/lib/mysql && \
mkdir -p /var/run/mysqld && \
mkdir -p /var/log/mysql && \
chown -R mysql:mysql /var/lib/mysql && \
chown -R mysql:mysql /var/run/mysqld && \
chown -R mysql:mysql /var/log/mysql
# UTF-8 and bind-address
RUN sed -i -e "$ a [client]\n\n[mysql]\n\n[mysqld]" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[client\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[mysql\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[mysqld\]\)/\1\ninit_connect='SET NAMES utf8'\ncharacter-set-server = utf8\ncollation-server=utf8_general_ci\nbind-address = 0.0.0.0/g" /etc/mysql/my.cnf
#9,安装nginx
RUN apt-get update && \
apt-get install -y nginx && \
rm -rf /var/lib/apt/lists/* && \
echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
chown -R www-data:www-data /var/lib/nginx
#10,修改escheduler配置文件
#后端配置
RUN mkdir -p /opt/escheduler && \
tar -zxvf /opt/easyscheduler_source/target/escheduler-${version}.tar.gz -C /opt/escheduler && \
rm -rf /opt/escheduler/conf
ADD ./conf/escheduler/conf /opt/escheduler/conf
#前端nginx配置
ADD ./conf/nginx/default.conf /etc/nginx/conf.d
#11,开放端口
EXPOSE 2181 2888 3888 3306 80 12345 8888
#12,安装sudo,python,vim,ping和ssh
RUN apt-get update && \
apt-get -y install sudo && \
apt-get -y install python && \
apt-get -y install vim && \
apt-get -y install iputils-ping && \
apt-get -y install net-tools && \
apt-get -y install openssh-server && \
apt-get -y install python-pip && \
pip install kazoo
COPY ./startup.sh /root/startup.sh
#13,修改权限和设置软连
RUN chmod +x /root/startup.sh && \
chmod +x /opt/escheduler/script/create_escheduler.sh && \
chmod +x /opt/zookeeper/bin/zkServer.sh && \
chmod +x /opt/escheduler/bin/escheduler-daemon.sh && \
rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls
ENTRYPOINT ["/root/startup.sh"]

21
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties

@ -0,0 +1,21 @@
#alert type is EMAIL/SMS
alert.type=EMAIL
# mail server configuration
mail.protocol=SMTP
mail.server.host=smtp.office365.com
mail.server.port=587
mail.sender=qiaozhanwei@outlook.com
mail.passwd=eschedulerBJEG
# TLS
mail.smtp.starttls.enable=true
# SSL
mail.smtp.ssl.enable=false
#xls file path,need create if not exist
xls.file.path=/tmp/xls

31
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert_logback.xml

@ -0,0 +1,31 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-alert.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>20</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="ALERTLOGFILE"/>
</root>
</configuration>

42
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/apiserver_logback.xml

@ -0,0 +1,42 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- Log level filter -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<file>${log.base}/escheduler-api-server.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="APISERVERLOGFILE" />
</root>
</configuration>

16
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties

@ -0,0 +1,16 @@
# server port
server.port=12345
# session config
server.session.timeout=7200
server.context-path=/escheduler/
# file size limit for upload
spring.http.multipart.max-file-size=1024MB
spring.http.multipart.max-request-size=1024MB
#post content
server.max-http-post-size=5000000

1
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application_master.properties

@ -0,0 +1 @@
logging.config=classpath:master_logback.xml

27
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties

@ -0,0 +1,27 @@
#task queue implementation, default "zookeeper"
escheduler.queue.impl=zookeeper
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/escheduler
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/escheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/escheduler/exec
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
data.store2hdfs.basepath=/escheduler
# whether hdfs starts
hdfs.startup.state=false
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
escheduler.env.path=/opt/escheduler/conf/env/.escheduler_env.sh
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default "false"
development.state=false

8
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties

@ -0,0 +1,8 @@
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
fs.defaultFS=hdfs://mycluster:8020
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s

3
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/install_config.conf

@ -0,0 +1,3 @@
installPath=/data1_1T/escheduler
deployUser=escheduler
ips=ark0,ark1,ark2,ark3,ark4

4
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/run_config.conf

@ -0,0 +1,4 @@
masters=ark0,ark1
workers=ark2,ark3,ark4
alertServer=ark3
apiServers=ark1

53
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/dao/data_source.properties

@ -0,0 +1,53 @@
# base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
spring.datasource.driver-class-name=com.mysql.jdbc.Driver
spring.datasource.url=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=UTF-8
spring.datasource.username=root
spring.datasource.password=root@123
# connection configuration
spring.datasource.initialSize=5
# min connection number
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
# data quality analysis is not currently in use. please ignore the following configuration
# task record flag
task.record.flag=false
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
task.record.datasource.username=xx
task.record.datasource.password=xx

4
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh vendored

@ -0,0 +1,4 @@
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH

1
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl

@ -0,0 +1 @@
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> easyscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html>

21
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master.properties

@ -0,0 +1,21 @@
# master execute thread num
master.exec.threads=100
# master execute task number in parallel
master.exec.task.number=20
# master heartbeat interval
master.heartbeat.interval=10
# master commit task retry times
master.task.commit.retryTimes=5
# master commit task interval
master.task.commit.interval=100
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=10
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=1

34
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master_logback.xml

@ -0,0 +1,34 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-master.log</file>
<filter class="cn.escheduler.server.master.log.MasterLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="MASTERLOGFILE"/>
</root>
</configuration>

39
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties

@ -0,0 +1,39 @@
#============================================================================
# Configure Main Scheduler Properties
#============================================================================
org.quartz.scheduler.instanceName = EasyScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.makeSchedulerThreadDaemon = true
org.quartz.jobStore.useProperties = false
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.makeThreadsDaemons = true
org.quartz.threadPool.threadCount = 25
org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.dataSource = myDs
#============================================================================
# Configure Datasources
#============================================================================
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
org.quartz.dataSource.myDs.URL = jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=utf8
org.quartz.dataSource.myDs.user = root
org.quartz.dataSource.myDs.password = root@123
org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1

15
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker.properties

@ -0,0 +1,15 @@
# worker execute thread num
worker.exec.threads=100
# worker heartbeat interval
worker.heartbeat.interval=10
# submit the number of tasks at a time
worker.fetch.task.num = 3
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
#worker.max.cpuload.avg=10
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=1

53
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker_logback.xml

@ -0,0 +1,53 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="TASKLOGFILE" class="cn.escheduler.server.worker.log.TaskLogAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="cn.escheduler.server.worker.log.TaskLogFilter"></filter>
<file>${log.base}/{processDefinitionId}/{processInstanceId}/{taskInstanceId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-worker.log</file>
<filter class="cn.escheduler.server.worker.log.WorkerLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
     
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
  
</appender>
<root level="INFO">
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/>
</root>
</configuration>

25
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties

@ -0,0 +1,25 @@
#zookeeper cluster
zookeeper.quorum=127.0.0.1:2181
#escheduler root directory
zookeeper.escheduler.root=/escheduler
#zookeeper server dirctory
zookeeper.escheduler.dead.servers=/escheduler/dead-servers
zookeeper.escheduler.masters=/escheduler/masters
zookeeper.escheduler.workers=/escheduler/workers
#zookeeper lock dirctory
zookeeper.escheduler.lock.masters=/escheduler/lock/masters
zookeeper.escheduler.lock.workers=/escheduler/lock/workers
#escheduler failover directory
zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters
zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers
zookeeper.escheduler.lock.failover.startup.masters=/escheduler/lock/failover/startup-masters
#escheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.sleep=1000
zookeeper.retry.maxtime=5

263
dockerfile/dockerfile-1.0.x/conf/maven/settings.xml

@ -0,0 +1,263 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<!--
| This is the configuration file for Maven. It can be specified at two levels:
|
| 1. User Level. This settings.xml file provides configuration for a single user,
| and is normally provided in ${user.home}/.m2/settings.xml.
|
| NOTE: This location can be overridden with the CLI option:
|
| -s /path/to/user/settings.xml
|
| 2. Global Level. This settings.xml file provides configuration for all Maven
| users on a machine (assuming they're all using the same Maven
| installation). It's normally provided in
| ${maven.home}/conf/settings.xml.
|
| NOTE: This location can be overridden with the CLI option:
|
| -gs /path/to/global/settings.xml
|
| The sections in this sample file are intended to give you a running start at
| getting the most out of your Maven installation. Where appropriate, the default
| values (values used when the setting is not specified) are provided.
|
|-->
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
<!-- localRepository
| The path to the local repository maven will use to store artifacts.
|
| Default: ${user.home}/.m2/repository
<localRepository>/path/to/local/repo</localRepository>
-->
<!-- interactiveMode
| This will determine whether maven prompts you when it needs input. If set to false,
| maven will use a sensible default value, perhaps based on some other setting, for
| the parameter in question.
|
| Default: true
<interactiveMode>true</interactiveMode>
-->
<!-- offline
| Determines whether maven should attempt to connect to the network when executing a build.
| This will have an effect on artifact downloads, artifact deployment, and others.
|
| Default: false
<offline>false</offline>
-->
<!-- pluginGroups
| This is a list of additional group identifiers that will be searched when resolving plugins by their prefix, i.e.
| when invoking a command line like "mvn prefix:goal". Maven will automatically add the group identifiers
| "org.apache.maven.plugins" and "org.codehaus.mojo" if these are not already contained in the list.
|-->
<pluginGroups>
<!-- pluginGroup
| Specifies a further group identifier to use for plugin lookup.
<pluginGroup>com.your.plugins</pluginGroup>
-->
</pluginGroups>
<!-- proxies
| This is a list of proxies which can be used on this machine to connect to the network.
| Unless otherwise specified (by system property or command-line switch), the first proxy
| specification in this list marked as active will be used.
|-->
<proxies>
<!-- proxy
| Specification for one proxy, to be used in connecting to the network.
|
<proxy>
<id>optional</id>
<active>true</active>
<protocol>http</protocol>
<username>proxyuser</username>
<password>proxypass</password>
<host>proxy.host.net</host>
<port>80</port>
<nonProxyHosts>local.net|some.host.com</nonProxyHosts>
</proxy>
-->
</proxies>
<!-- servers
| This is a list of authentication profiles, keyed by the server-id used within the system.
| Authentication profiles can be used whenever maven must make a connection to a remote server.
|-->
<servers>
<!-- server
| Specifies the authentication information to use when connecting to a particular server, identified by
| a unique name within the system (referred to by the 'id' attribute below).
|
| NOTE: You should either specify username/password OR privateKey/passphrase, since these pairings are
| used together.
|
<server>
<id>deploymentRepo</id>
<username>repouser</username>
<password>repopwd</password>
</server>
-->
<!-- Another sample, using keys to authenticate.
<server>
<id>siteServer</id>
<privateKey>/path/to/private/key</privateKey>
<passphrase>optional; leave empty if not used.</passphrase>
</server>
-->
</servers>
<!-- mirrors
| This is a list of mirrors to be used in downloading artifacts from remote repositories.
|
| It works like this: a POM may declare a repository to use in resolving certain artifacts.
| However, this repository may have problems with heavy traffic at times, so people have mirrored
| it to several places.
|
| That repository definition will have a unique id, so we can create a mirror reference for that
| repository, to be used as an alternate download site. The mirror site will be the preferred
| server for that repository.
|-->
<mirrors>
<!-- mirror
| Specifies a repository mirror site to use instead of a given repository. The repository that
| this mirror serves has an ID that matches the mirrorOf element of this mirror. IDs are used
| for inheritance and direct lookup purposes, and must be unique across the set of mirrors.
|
<mirror>
<id>mirrorId</id>
<mirrorOf>repositoryId</mirrorOf>
<name>Human Readable Name for this Mirror.</name>
<url>http://my.repository.com/repo/path</url>
</mirror>
-->
<mirror>
<id>nexus-aliyun</id>
<mirrorOf>central</mirrorOf>
<name>Nexus aliyun</name>
<url>http://maven.aliyun.com/nexus/content/groups/public</url>
</mirror>
</mirrors>
<!-- profiles
| This is a list of profiles which can be activated in a variety of ways, and which can modify
| the build process. Profiles provided in the settings.xml are intended to provide local machine-
| specific paths and repository locations which allow the build to work in the local environment.
|
| For example, if you have an integration testing plugin - like cactus - that needs to know where
| your Tomcat instance is installed, you can provide a variable here such that the variable is
| dereferenced during the build process to configure the cactus plugin.
|
| As noted above, profiles can be activated in a variety of ways. One way - the activeProfiles
| section of this document (settings.xml) - will be discussed later. Another way essentially
| relies on the detection of a system property, either matching a particular value for the property,
| or merely testing its existence. Profiles can also be activated by JDK version prefix, where a
| value of '1.4' might activate a profile when the build is executed on a JDK version of '1.4.2_07'.
| Finally, the list of active profiles can be specified directly from the command line.
|
| NOTE: For profiles defined in the settings.xml, you are restricted to specifying only artifact
| repositories, plugin repositories, and free-form properties to be used as configuration
| variables for plugins in the POM.
|
|-->
<profiles>
<!-- profile
| Specifies a set of introductions to the build process, to be activated using one or more of the
| mechanisms described above. For inheritance purposes, and to activate profiles via <activatedProfiles/>
| or the command line, profiles have to have an ID that is unique.
|
| An encouraged best practice for profile identification is to use a consistent naming convention
| for profiles, such as 'env-dev', 'env-test', 'env-production', 'user-jdcasey', 'user-brett', etc.
| This will make it more intuitive to understand what the set of introduced profiles is attempting
| to accomplish, particularly when you only have a list of profile id's for debug.
|
| This profile example uses the JDK version to trigger activation, and provides a JDK-specific repo.
<profile>
<id>jdk-1.4</id>
<activation>
<jdk>1.4</jdk>
</activation>
<repositories>
<repository>
<id>jdk14</id>
<name>Repository for JDK 1.4 builds</name>
<url>http://www.myhost.com/maven/jdk14</url>
<layout>default</layout>
<snapshotPolicy>always</snapshotPolicy>
</repository>
</repositories>
</profile>
-->
<!--
| Here is another profile, activated by the system property 'target-env' with a value of 'dev',
| which provides a specific path to the Tomcat instance. To use this, your plugin configuration
| might hypothetically look like:
|
| ...
| <plugin>
| <groupId>org.myco.myplugins</groupId>
| <artifactId>myplugin</artifactId>
|
| <configuration>
| <tomcatLocation>${tomcatPath}</tomcatLocation>
| </configuration>
| </plugin>
| ...
|
| NOTE: If you just wanted to inject this configuration whenever someone set 'target-env' to
| anything, you could just leave off the <value/> inside the activation-property.
|
<profile>
<id>env-dev</id>
<activation>
<property>
<name>target-env</name>
<value>dev</value>
</property>
</activation>
<properties>
<tomcatPath>/path/to/tomcat/instance</tomcatPath>
</properties>
</profile>
-->
</profiles>
<!-- activeProfiles
| List of profiles that are active for all builds.
|
<activeProfiles>
<activeProfile>alwaysActiveProfile</activeProfile>
<activeProfile>anotherAlwaysActiveProfile</activeProfile>
</activeProfiles>
-->
</settings>

31
dockerfile/dockerfile-1.0.x/conf/nginx/default.conf

@ -0,0 +1,31 @@
server {
listen 8888;
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /opt/easyscheduler_source/escheduler-ui/dist;
index index.html index.html;
}
location /escheduler {
proxy_pass http://127.0.0.1:12345;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header x_real_ipP $remote_addr;
proxy_set_header remote_addr $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_connect_timeout 300s;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

28
dockerfile/dockerfile-1.0.x/conf/zookeeper/zoo.cfg

@ -0,0 +1,28 @@
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1

8
dockerfile/dockerfile-1.0.x/hooks/build

@ -0,0 +1,8 @@
#!/bin/bash
echo "------ escheduler start - build -------"
printenv
docker build --build-arg version=$version -t $DOCKER_REPO:$version .
echo "------ escheduler end - build -------"

8
dockerfile/dockerfile-1.0.x/hooks/push

@ -0,0 +1,8 @@
#!/bin/bash
echo "------ push start -------"
printenv
docker push $DOCKER_REPO:$version
echo "------ push end -------"

79
dockerfile/dockerfile-1.0.x/startup.sh

@ -0,0 +1,79 @@
#! /bin/bash
set -e
if [ `netstat -anop|grep mysql|wc -l` -gt 0 ];then
echo "MySQL is Running."
else
MYSQL_ROOT_PWD="root@123"
ESZ_DB="escheduler"
echo "启动mysql服务"
chown -R mysql:mysql /var/lib/mysql /var/run/mysqld
find /var/lib/mysql -type f -exec touch {} \; && service mysql restart $ sleep 10
if [ ! -f /nohup.out ];then
echo "设置mysql密码"
mysql --user=root --password=root -e "UPDATE mysql.user set authentication_string=password('$MYSQL_ROOT_PWD') where user='root'; FLUSH PRIVILEGES;"
echo "设置mysql权限"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '$MYSQL_ROOT_PWD' WITH GRANT OPTION; FLUSH PRIVILEGES;"
echo "创建escheduler数据库"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
echo "导入mysql数据"
nohup /opt/escheduler/script/create_escheduler.sh &
fi
if [ `mysql --user=root --password=$MYSQL_ROOT_PWD -s -r -e "SELECT count(TABLE_NAME) FROM information_schema.TABLES WHERE TABLE_SCHEMA='escheduler';" | grep -v count` -eq 38 ];then
echo "\`$ESZ_DB\` 表个数正确"
else
echo "\`$ESZ_DB\` 表个数不正确"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "DROP DATABASE \`$ESZ_DB\`;"
echo "创建escheduler数据库"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
echo "导入mysql数据"
nohup /opt/escheduler/script/create_escheduler.sh &
fi
fi
/opt/zookeeper/bin/zkServer.sh restart
sleep 10
echo "启动api-server"
/opt/escheduler/bin/escheduler-daemon.sh stop api-server
/opt/escheduler/bin/escheduler-daemon.sh start api-server
echo "启动master-server"
/opt/escheduler/bin/escheduler-daemon.sh stop master-server
python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/masters
/opt/escheduler/bin/escheduler-daemon.sh start master-server
echo "启动worker-server"
/opt/escheduler/bin/escheduler-daemon.sh stop worker-server
python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/workers
/opt/escheduler/bin/escheduler-daemon.sh start worker-server
echo "启动logger-server"
/opt/escheduler/bin/escheduler-daemon.sh stop logger-server
/opt/escheduler/bin/escheduler-daemon.sh start logger-server
echo "启动alert-server"
/opt/escheduler/bin/escheduler-daemon.sh stop alert-server
/opt/escheduler/bin/escheduler-daemon.sh start alert-server
echo "启动nginx"
/etc/init.d/nginx stop
nginx &
while true
do
sleep 101
done
exec "$@"

146
dockerfile/dockerfile-1.1.x/Dockerfile

@ -0,0 +1,146 @@
FROM ubuntu:18.04
MAINTAINER journey "825193156@qq.com"
ENV LANG=C.UTF-8
ARG version
ARG tar_version
#1,安装jdk
RUN apt-get update \
&& apt-get -y install openjdk-8-jdk \
&& rm -rf /var/lib/apt/lists/*
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
ENV PATH $JAVA_HOME/bin:$PATH
#安装wget
RUN apt-get update && \
apt-get -y install wget
#2,安装ZK
#RUN cd /opt && \
# wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz && \
# tar -zxvf zookeeper-3.4.6.tar.gz && \
# mv zookeeper-3.4.6 zookeeper && \
# rm -rf ./zookeeper-*tar.gz && \
# mkdir -p /tmp/zookeeper && \
# rm -rf /opt/zookeeper/conf/zoo_sample.cfg
RUN cd /opt && \
wget https://www-us.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz && \
tar -zxvf zookeeper-3.4.14.tar.gz && \
mv zookeeper-3.4.14 zookeeper && \
rm -rf ./zookeeper-*tar.gz && \
mkdir -p /tmp/zookeeper && \
rm -rf /opt/zookeeper/conf/zoo_sample.cfg
ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
ENV ZK_HOME=/opt/zookeeper
ENV PATH $PATH:$ZK_HOME/bin
#3,安装maven
RUN cd /opt && \
wget http://apache-mirror.rbc.ru/pub/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz && \
tar -zxvf apache-maven-3.3.9-bin.tar.gz && \
mv apache-maven-3.3.9 maven && \
rm -rf ./apache-maven-*tar.gz && \
rm -rf /opt/maven/conf/settings.xml
ADD ./conf/maven/settings.xml /opt/maven/conf
ENV MAVEN_HOME=/opt/maven
ENV PATH $PATH:$MAVEN_HOME/bin
#4,安装node
RUN cd /opt && \
wget https://nodejs.org/download/release/v8.9.4/node-v8.9.4-linux-x64.tar.gz && \
tar -zxvf node-v8.9.4-linux-x64.tar.gz && \
mv node-v8.9.4-linux-x64 node && \
rm -rf ./node-v8.9.4-*tar.gz
ENV NODE_HOME=/opt/node
ENV PATH $PATH:$NODE_HOME/bin
#5,下载escheduler
RUN cd /opt && \
wget https://github.com/analysys/EasyScheduler/archive/${version}.tar.gz && \
tar -zxvf ${version}.tar.gz && \
mv EasyScheduler-${version} easyscheduler_source && \
rm -rf ./${version}.tar.gz
#6,后端编译
RUN cd /opt/easyscheduler_source && \
mvn -U clean package assembly:assembly -Dmaven.test.skip=true
#7,前端编译
RUN chmod -R 777 /opt/easyscheduler_source/escheduler-ui && \
cd /opt/easyscheduler_source/escheduler-ui && \
rm -rf /opt/easyscheduler_source/escheduler-ui/node_modules && \
npm install node-sass --unsafe-perm && \
npm install && \
npm run build
#8,安装mysql
RUN echo "deb http://cn.archive.ubuntu.com/ubuntu/ xenial main restricted universe multiverse" >> /etc/apt/sources.list
RUN echo "mysql-server mysql-server/root_password password root" | debconf-set-selections
RUN echo "mysql-server mysql-server/root_password_again password root" | debconf-set-selections
RUN apt-get update && \
apt-get -y install mysql-server-5.7 && \
mkdir -p /var/lib/mysql && \
mkdir -p /var/run/mysqld && \
mkdir -p /var/log/mysql && \
chown -R mysql:mysql /var/lib/mysql && \
chown -R mysql:mysql /var/run/mysqld && \
chown -R mysql:mysql /var/log/mysql
# UTF-8 and bind-address
RUN sed -i -e "$ a [client]\n\n[mysql]\n\n[mysqld]" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[client\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[mysql\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[mysqld\]\)/\1\ninit_connect='SET NAMES utf8'\ncharacter-set-server = utf8\ncollation-server=utf8_general_ci\nbind-address = 0.0.0.0/g" /etc/mysql/my.cnf
#9,安装nginx
RUN apt-get update && \
apt-get install -y nginx && \
rm -rf /var/lib/apt/lists/* && \
echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
chown -R www-data:www-data /var/lib/nginx
#10,修改escheduler配置文件
#后端配置
RUN mkdir -p /opt/escheduler && \
tar -zxvf /opt/easyscheduler_source/target/escheduler-${tar_version}.tar.gz -C /opt/escheduler && \
rm -rf /opt/escheduler/conf
ADD ./conf/escheduler/conf /opt/escheduler/conf
#前端nginx配置
ADD ./conf/nginx/default.conf /etc/nginx/conf.d
#11,开放端口
EXPOSE 2181 2888 3888 3306 80 12345 8888
#12,安装sudo,python,vim,ping和ssh
RUN apt-get update && \
apt-get -y install sudo && \
apt-get -y install python && \
apt-get -y install vim && \
apt-get -y install iputils-ping && \
apt-get -y install net-tools && \
apt-get -y install openssh-server && \
apt-get -y install python-pip && \
pip install kazoo
COPY ./startup.sh /root/startup.sh
#13,修改权限和设置软连
RUN chmod +x /root/startup.sh && \
chmod +x /opt/escheduler/script/create_escheduler.sh && \
chmod +x /opt/zookeeper/bin/zkServer.sh && \
chmod +x /opt/escheduler/bin/escheduler-daemon.sh && \
rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls
ENTRYPOINT ["/root/startup.sh"]

30
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert.properties

@ -0,0 +1,30 @@
#alert type is EMAIL/SMS
alert.type=EMAIL
# mail server configuration
mail.protocol=SMTP
mail.server.host=smtp.office365.com
mail.server.port=587
mail.sender=qiaozhanwei@outlook.com
mail.passwd=eschedulerBJEG
# TLS
mail.smtp.starttls.enable=true
# SSL
mail.smtp.ssl.enable=false
#xls file path,need create if not exist
xls.file.path=/tmp/xls
# Enterprise WeChat configuration
enterprise.wechat.corp.id=xxxxxxx
enterprise.wechat.secret=xxxxxxx
enterprise.wechat.agent.id=xxxxxxx
enterprise.wechat.users=xxxxxxx
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}

31
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml

@ -0,0 +1,31 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-alert.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>20</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="ALERTLOGFILE"/>
</root>
</configuration>

42
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml

@ -0,0 +1,42 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- Log level filter -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<file>${log.base}/escheduler-api-server.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="APISERVERLOGFILE" />
</root>
</configuration>

19
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application.properties

@ -0,0 +1,19 @@
# server port
server.port=12345
# session config
server.servlet.session.timeout=7200
server.servlet.context-path=/escheduler/
# file size limit for upload
spring.servlet.multipart.max-file-size=1024MB
spring.servlet.multipart.max-request-size=1024MB
#post content
server.jetty.max-http-post-size=5000000
spring.messages.encoding=UTF-8
#i18n classpath folder , file prefix messages, if have many files, use "," seperator
spring.messages.basename=i18n/messages

1
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties

@ -0,0 +1 @@
logging.config=classpath:master_logback.xml

42
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/common.properties

@ -0,0 +1,42 @@
#task queue implementation, default "zookeeper"
escheduler.queue.impl=zookeeper
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/escheduler
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/escheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/escheduler/exec
# Users who have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
data.store2hdfs.basepath=/escheduler
# resource upload startup type : HDFS,S3,NONE
res.upload.startup.type=NONE
# whether kerberos starts
hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path
java.security.krb5.conf.path=/opt/krb5.conf
# loginUserFromKeytab user
login.user.keytab.username=hdfs-mycluster@ESZ.COM
# loginUserFromKeytab path
login.user.keytab.path=/opt/hdfs.headless.keytab
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
escheduler.env.path=/opt/escheduler/conf/env/.escheduler_env.sh
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default "false"
development.state=true

18
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/hadoop/hadoop.properties

@ -0,0 +1,18 @@
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
# to the conf directory,support s3,for example : s3a://escheduler
fs.defaultFS=hdfs://mycluster:8020
# s3 need,s3 endpoint
fs.s3a.endpoint=http://192.168.199.91:9010
# s3 need,s3 access key
fs.s3a.access.key=A3DXS30FO22544RE
# s3 need,s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s

3
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf

@ -0,0 +1,3 @@
installPath=/data1_1T/escheduler
deployUser=escheduler
ips=ark0,ark1,ark2,ark3,ark4

4
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf

@ -0,0 +1,4 @@
masters=ark0,ark1
workers=ark2,ark3,ark4
alertServer=ark3
apiServers=ark1

53
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties

@ -0,0 +1,53 @@
# base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
spring.datasource.driver-class-name=com.mysql.jdbc.Driver
spring.datasource.url=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=UTF-8
spring.datasource.username=root
spring.datasource.password=root@123
# connection configuration
spring.datasource.initialSize=5
# min connection number
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
# data quality analysis is not currently in use. please ignore the following configuration
# task record flag
task.record.flag=false
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
task.record.datasource.username=xx
task.record.datasource.password=xx

3
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/env/.escheduler_env.sh vendored

@ -0,0 +1,3 @@
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH

229
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages.properties

@ -0,0 +1,229 @@
QUERY_SCHEDULE_LIST_NOTES=query schedule list
EXECUTE_PROCESS_TAG=execute process related operation
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
RUN_PROCESS_INSTANCE_NOTES=run process instance
START_NODE_LIST=start node list(node name)
TASK_DEPEND_TYPE=task depend type
COMMAND_TYPE=command type
RUN_MODE=run mode
TIMEOUT=timeout
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
EXECUTE_TYPE=execute type
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
GET_RECEIVER_CC_NOTES=query receiver cc
DESC=description
GROUP_NAME=group name
GROUP_TYPE=group type
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
UPDATE_ALERT_GROUP_NOTES=update alert group
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
GRANT_ALERT_GROUP_NOTES=grant alert group
USER_IDS=user id list
ALERT_GROUP_TAG=alert group related operation
CREATE_ALERT_GROUP_NOTES=create alert group
WORKER_GROUP_TAG=worker group related operation
SAVE_WORKER_GROUP_NOTES=create worker group
WORKER_GROUP_NAME=worker group name
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
DATA_ANALYSIS_TAG=analysis related operation of task state
COUNT_TASK_STATE_NOTES=count task state
COUNT_PROCESS_INSTANCE_NOTES=count process instance state
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
COUNT_COMMAND_STATE_NOTES=count command state
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
ACCESS_TOKEN_TAG=access token related operation
MONITOR_TAG=monitor related operation
MASTER_LIST_NOTES=master server list
WORKER_LIST_NOTES=worker server list
QUERY_DATABASE_STATE_NOTES=query database state
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
TASK_STATE=task instance state
SOURCE_TABLE=SOURCE TABLE
DEST_TABLE=dest table
TASK_DATE=task date
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
DATA_SOURCE_TAG=data source related operation
CREATE_DATA_SOURCE_NOTES=create data source
DATA_SOURCE_NAME=data source name
DATA_SOURCE_NOTE=data source desc
DB_TYPE=database type
DATA_SOURCE_HOST=DATA SOURCE HOST
DATA_SOURCE_PORT=data source port
DATABASE_NAME=database name
QUEUE_TAG=queue related operation
QUERY_QUEUE_LIST_NOTES=query queue list
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
CREATE_QUEUE_NOTES=create queue
YARN_QUEUE_NAME=yarn(hadoop) queue name
QUEUE_ID=queue id
TENANT_DESC=tenant desc
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
QUERY_TENANT_LIST_NOTES=query tenant list
UPDATE_TENANT_NOTES=update tenant
DELETE_TENANT_NOTES=delete tenant
RESOURCES_TAG=resource center related operation
CREATE_RESOURCE_NOTES=create resource
RESOURCE_TYPE=resource file type
RESOURCE_NAME=resource name
RESOURCE_DESC=resource file desc
RESOURCE_FILE=resource file
RESOURCE_ID=resource id
QUERY_RESOURCE_LIST_NOTES=query resource list
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
VIEW_RESOURCE_BY_ID_NOTES=view resource by id
ONLINE_CREATE_RESOURCE_NOTES=online create resource
SUFFIX=resource file suffix
CONTENT=resource file content
UPDATE_RESOURCE_NOTES=edit resource file online
DOWNLOAD_RESOURCE_NOTES=download resource file
CREATE_UDF_FUNCTION_NOTES=create udf function
UDF_TYPE=UDF type
FUNC_NAME=function name
CLASS_NAME=package and class name
ARG_TYPES=arguments
UDF_DESC=udf desc
VIEW_UDF_FUNCTION_NOTES=view udf function
UPDATE_UDF_FUNCTION_NOTES=update udf function
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
DELETE_UDF_FUNCTION_NOTES=delete udf function
AUTHORIZED_FILE_NOTES=authorized file
UNAUTHORIZED_FILE_NOTES=unauthorized file
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
VERIFY_QUEUE_NOTES=verify queue
TENANT_TAG=tenant related operation
CREATE_TENANT_NOTES=create tenant
TENANT_CODE=tenant code
TENANT_NAME=tenant name
QUEUE_NAME=queue name
PASSWORD=password
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
PROJECT_TAG=project related operation
CREATE_PROJECT_NOTES=create project
PROJECT_DESC=project description
UPDATE_PROJECT_NOTES=update project
PROJECT_ID=project id
QUERY_PROJECT_BY_ID_NOTES=query project info by project id
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
DELETE_PROJECT_BY_ID_NOTES=delete project by id
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
TASK_RECORD_TAG=task record related operation
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
CREATE_TOKEN_NOTES=create token ,note: please login first
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
SCHEDULE=schedule
WARNING_TYPE=warning type(sending strategy)
WARNING_GROUP_ID=warning group id
FAILURE_STRATEGY=failure strategy
RECEIVERS=receivers
RECEIVERS_CC=receivers cc
WORKER_GROUP_ID=worker server group id
PROCESS_INSTANCE_PRIORITY=process instance priority
UPDATE_SCHEDULE_NOTES=update schedule
SCHEDULE_ID=schedule id
ONLINE_SCHEDULE_NOTES=online schedule
OFFLINE_SCHEDULE_NOTES=offline schedule
QUERY_SCHEDULE_NOTES=query schedule
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
LOGIN_TAG=User login related operations
USER_NAME=user name
PROJECT_NAME=project name
CREATE_PROCESS_DEFINITION_NOTES=create process definition
PROCESS_DEFINITION_NAME=process definition name
PROCESS_DEFINITION_JSON=process definition detail info (json format)
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
PROCESS_DEFINITION_DESC=process definition desc
PROCESS_DEFINITION_TAG=process definition related opertation
SIGNOUT_NOTES=logout
USER_PASSWORD=user password
UPDATE_PROCESS_INSTANCE_NOTES=update process instance
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
LOGIN_NOTES=user login
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
PROCESS_DEFINITION_ID=process definition id
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
PAGE_NO=page no
PROCESS_INSTANCE_ID=process instance id
PROCESS_INSTANCE_JSON=process instance info(json format)
SCHEDULE_TIME=schedule time
SYNC_DEFINE=update the information of the process instance to the process definition\
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
SEARCH_VAL=search val
USER_ID=user id
PAGE_SIZE=page size
LIMIT=limit
VIEW_TREE_NOTES=view tree
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
PROCESS_DEFINITION_ID_LIST=process definition id list
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
TASK_ID=task instance id
SKIP_LINE_NUM=skip line num
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
USERS_TAG=users related operation
SCHEDULER_TAG=scheduler related operation
CREATE_SCHEDULE_NOTES=create schedule
CREATE_USER_NOTES=create user
TENANT_ID=tenant id
QUEUE=queue
EMAIL=email
PHONE=phone
QUERY_USER_LIST_NOTES=query user list
UPDATE_USER_NOTES=update user
DELETE_USER_BY_ID_NOTES=delete user by id
GRANT_PROJECT_NOTES=GRANT PROJECT
PROJECT_IDS=project ids(string format, multiple projects separated by ",")
GRANT_RESOURCE_NOTES=grant resource file
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
GET_USER_INFO_NOTES=get user info
LIST_USER_NOTES=list user
VERIFY_USER_NAME_NOTES=verify user name
UNAUTHORIZED_USER_NOTES=cancel authorization
ALERT_GROUP_ID=alert group id
AUTHORIZED_USER_NOTES=authorized user
GRANT_UDF_FUNC_NOTES=grant udf function
UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
GRANT_DATASOURCE_NOTES=grant datasource
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
VIEW_GANTT_NOTES=view gantt
SUB_PROCESS_INSTANCE_ID=sub process instance id
TASK_NAME=task instance name
TASK_INSTANCE_TAG=task instance related operation
LOGGER_TAG=log related operation
PROCESS_INSTANCE_TAG=process instance related operation
EXECUTION_STATUS=runing status for workflow and task nodes
HOST=ip address of running task
START_DATE=start date
END_DATE=end date
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
UPDATE_DATA_SOURCE_NOTES=update data source
DATA_SOURCE_ID=DATA SOURCE ID
QUERY_DATA_SOURCE_NOTES=query data source by id
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
DELETE_DATA_SOURCE_NOTES=delete data source
VERIFY_DATA_SOURCE_NOTES=verify data source
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id

229
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_en_US.properties

@ -0,0 +1,229 @@
QUERY_SCHEDULE_LIST_NOTES=query schedule list
EXECUTE_PROCESS_TAG=execute process related operation
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
RUN_PROCESS_INSTANCE_NOTES=run process instance
START_NODE_LIST=start node list(node name)
TASK_DEPEND_TYPE=task depend type
COMMAND_TYPE=command type
RUN_MODE=run mode
TIMEOUT=timeout
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
EXECUTE_TYPE=execute type
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
GET_RECEIVER_CC_NOTES=query receiver cc
DESC=description
GROUP_NAME=group name
GROUP_TYPE=group type
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
UPDATE_ALERT_GROUP_NOTES=update alert group
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
GRANT_ALERT_GROUP_NOTES=grant alert group
USER_IDS=user id list
ALERT_GROUP_TAG=alert group related operation
CREATE_ALERT_GROUP_NOTES=create alert group
WORKER_GROUP_TAG=worker group related operation
SAVE_WORKER_GROUP_NOTES=create worker group
WORKER_GROUP_NAME=worker group name
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
DATA_ANALYSIS_TAG=analysis related operation of task state
COUNT_TASK_STATE_NOTES=count task state
COUNT_PROCESS_INSTANCE_NOTES=count process instance state
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
COUNT_COMMAND_STATE_NOTES=count command state
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
ACCESS_TOKEN_TAG=access token related operation
MONITOR_TAG=monitor related operation
MASTER_LIST_NOTES=master server list
WORKER_LIST_NOTES=worker server list
QUERY_DATABASE_STATE_NOTES=query database state
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
TASK_STATE=task instance state
SOURCE_TABLE=SOURCE TABLE
DEST_TABLE=dest table
TASK_DATE=task date
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
DATA_SOURCE_TAG=data source related operation
CREATE_DATA_SOURCE_NOTES=create data source
DATA_SOURCE_NAME=data source name
DATA_SOURCE_NOTE=data source desc
DB_TYPE=database type
DATA_SOURCE_HOST=DATA SOURCE HOST
DATA_SOURCE_PORT=data source port
DATABASE_NAME=database name
QUEUE_TAG=queue related operation
QUERY_QUEUE_LIST_NOTES=query queue list
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
CREATE_QUEUE_NOTES=create queue
YARN_QUEUE_NAME=yarn(hadoop) queue name
QUEUE_ID=queue id
TENANT_DESC=tenant desc
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
QUERY_TENANT_LIST_NOTES=query tenant list
UPDATE_TENANT_NOTES=update tenant
DELETE_TENANT_NOTES=delete tenant
RESOURCES_TAG=resource center related operation
CREATE_RESOURCE_NOTES=create resource
RESOURCE_TYPE=resource file type
RESOURCE_NAME=resource name
RESOURCE_DESC=resource file desc
RESOURCE_FILE=resource file
RESOURCE_ID=resource id
QUERY_RESOURCE_LIST_NOTES=query resource list
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
VIEW_RESOURCE_BY_ID_NOTES=view resource by id
ONLINE_CREATE_RESOURCE_NOTES=online create resource
SUFFIX=resource file suffix
CONTENT=resource file content
UPDATE_RESOURCE_NOTES=edit resource file online
DOWNLOAD_RESOURCE_NOTES=download resource file
CREATE_UDF_FUNCTION_NOTES=create udf function
UDF_TYPE=UDF type
FUNC_NAME=function name
CLASS_NAME=package and class name
ARG_TYPES=arguments
UDF_DESC=udf desc
VIEW_UDF_FUNCTION_NOTES=view udf function
UPDATE_UDF_FUNCTION_NOTES=update udf function
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
DELETE_UDF_FUNCTION_NOTES=delete udf function
AUTHORIZED_FILE_NOTES=authorized file
UNAUTHORIZED_FILE_NOTES=unauthorized file
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
VERIFY_QUEUE_NOTES=verify queue
TENANT_TAG=tenant related operation
CREATE_TENANT_NOTES=create tenant
TENANT_CODE=tenant code
TENANT_NAME=tenant name
QUEUE_NAME=queue name
PASSWORD=password
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
PROJECT_TAG=project related operation
CREATE_PROJECT_NOTES=create project
PROJECT_DESC=project description
UPDATE_PROJECT_NOTES=update project
PROJECT_ID=project id
QUERY_PROJECT_BY_ID_NOTES=query project info by project id
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
DELETE_PROJECT_BY_ID_NOTES=delete project by id
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
TASK_RECORD_TAG=task record related operation
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
CREATE_TOKEN_NOTES=create token ,note: please login first
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
SCHEDULE=schedule
WARNING_TYPE=warning type(sending strategy)
WARNING_GROUP_ID=warning group id
FAILURE_STRATEGY=failure strategy
RECEIVERS=receivers
RECEIVERS_CC=receivers cc
WORKER_GROUP_ID=worker server group id
PROCESS_INSTANCE_PRIORITY=process instance priority
UPDATE_SCHEDULE_NOTES=update schedule
SCHEDULE_ID=schedule id
ONLINE_SCHEDULE_NOTES=online schedule
OFFLINE_SCHEDULE_NOTES=offline schedule
QUERY_SCHEDULE_NOTES=query schedule
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
LOGIN_TAG=User login related operations
USER_NAME=user name
PROJECT_NAME=project name
CREATE_PROCESS_DEFINITION_NOTES=create process definition
PROCESS_DEFINITION_NAME=process definition name
PROCESS_DEFINITION_JSON=process definition detail info (json format)
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
PROCESS_DEFINITION_DESC=process definition desc
PROCESS_DEFINITION_TAG=process definition related opertation
SIGNOUT_NOTES=logout
USER_PASSWORD=user password
UPDATE_PROCESS_INSTANCE_NOTES=update process instance
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
LOGIN_NOTES=user login
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
PROCESS_DEFINITION_ID=process definition id
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
PAGE_NO=page no
PROCESS_INSTANCE_ID=process instance id
PROCESS_INSTANCE_JSON=process instance info(json format)
SCHEDULE_TIME=schedule time
SYNC_DEFINE=update the information of the process instance to the process definition\
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
SEARCH_VAL=search val
USER_ID=user id
PAGE_SIZE=page size
LIMIT=limit
VIEW_TREE_NOTES=view tree
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
PROCESS_DEFINITION_ID_LIST=process definition id list
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
TASK_ID=task instance id
SKIP_LINE_NUM=skip line num
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
USERS_TAG=users related operation
SCHEDULER_TAG=scheduler related operation
CREATE_SCHEDULE_NOTES=create schedule
CREATE_USER_NOTES=create user
TENANT_ID=tenant id
QUEUE=queue
EMAIL=email
PHONE=phone
QUERY_USER_LIST_NOTES=query user list
UPDATE_USER_NOTES=update user
DELETE_USER_BY_ID_NOTES=delete user by id
GRANT_PROJECT_NOTES=GRANT PROJECT
PROJECT_IDS=project ids(string format, multiple projects separated by ",")
GRANT_RESOURCE_NOTES=grant resource file
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
GET_USER_INFO_NOTES=get user info
LIST_USER_NOTES=list user
VERIFY_USER_NAME_NOTES=verify user name
UNAUTHORIZED_USER_NOTES=cancel authorization
ALERT_GROUP_ID=alert group id
AUTHORIZED_USER_NOTES=authorized user
GRANT_UDF_FUNC_NOTES=grant udf function
UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
GRANT_DATASOURCE_NOTES=grant datasource
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
VIEW_GANTT_NOTES=view gantt
SUB_PROCESS_INSTANCE_ID=sub process instance id
TASK_NAME=task instance name
TASK_INSTANCE_TAG=task instance related operation
LOGGER_TAG=log related operation
PROCESS_INSTANCE_TAG=process instance related operation
EXECUTION_STATUS=runing status for workflow and task nodes
HOST=ip address of running task
START_DATE=start date
END_DATE=end date
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
UPDATE_DATA_SOURCE_NOTES=update data source
DATA_SOURCE_ID=DATA SOURCE ID
QUERY_DATA_SOURCE_NOTES=query data source by id
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
DELETE_DATA_SOURCE_NOTES=delete data source
VERIFY_DATA_SOURCE_NOTES=verify data source
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id

227
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_zh_CN.properties

@ -0,0 +1,227 @@
QUERY_SCHEDULE_LIST_NOTES=查询定时列表
PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作
RUN_PROCESS_INSTANCE_NOTES=运行流程实例
START_NODE_LIST=开始节点列表(节点name)
TASK_DEPEND_TYPE=任务依赖类型
COMMAND_TYPE=指令类型
RUN_MODE=运行模式
TIMEOUT=超时时间
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等)
EXECUTE_TYPE=执行类型
START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义
DESC=备注(描述)
GROUP_NAME=组名称
GROUP_TYPE=组类型
QUERY_ALERT_GROUP_LIST_NOTES=告警组列表\
UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组
DELETE_ALERT_GROUP_BY_ID_NOTES=删除告警组通过ID
VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在
GRANT_ALERT_GROUP_NOTES=授权告警组
USER_IDS=用户ID列表
ALERT_GROUP_TAG=告警组相关操作
WORKER_GROUP_TAG=Worker分组管理
SAVE_WORKER_GROUP_NOTES=创建Worker分组\
WORKER_GROUP_NAME=Worker分组名称
WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割\
QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理
QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组
DELETE_WORKER_GROUP_BY_ID_NOTES=删除worker group通过ID
DATA_ANALYSIS_TAG=任务状态分析相关操作
COUNT_TASK_STATE_NOTES=任务状态统计
COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义
COUNT_COMMAND_STATE_NOTES=统计命令状态
COUNT_QUEUE_STATE_NOTES=统计队列里任务状态
ACCESS_TOKEN_TAG=access token相关操作,需要先登录
MONITOR_TAG=监控相关操作
MASTER_LIST_NOTES=master服务列表
WORKER_LIST_NOTES=worker服务列表
QUERY_DATABASE_STATE_NOTES=查询数据库状态
QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态
TASK_STATE=任务实例状态
SOURCE_TABLE=源表
DEST_TABLE=目标表
TASK_DATE=任务时间
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表
DATA_SOURCE_TAG=数据源相关操作
CREATE_DATA_SOURCE_NOTES=创建数据源
DATA_SOURCE_NAME=数据源名称
DATA_SOURCE_NOTE=数据源描述
DB_TYPE=数据源类型
DATA_SOURCE_HOST=IP主机名
DATA_SOURCE_PORT=数据源端口
DATABASE_NAME=数据库名
QUEUE_TAG=队列相关操作
QUERY_QUEUE_LIST_NOTES=查询队列列表
QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表
CREATE_QUEUE_NOTES=创建队列
YARN_QUEUE_NAME=hadoop yarn队列名
QUEUE_ID=队列ID
TENANT_DESC=租户描述
QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表
QUERY_TENANT_LIST_NOTES=查询租户列表
UPDATE_TENANT_NOTES=更新租户
DELETE_TENANT_NOTES=删除租户
RESOURCES_TAG=资源中心相关操作
CREATE_RESOURCE_NOTES=创建资源
RESOURCE_TYPE=资源文件类型
RESOURCE_NAME=资源文件名称
RESOURCE_DESC=资源文件描述
RESOURCE_FILE=资源文件
RESOURCE_ID=资源ID
QUERY_RESOURCE_LIST_NOTES=查询资源列表
DELETE_RESOURCE_BY_ID_NOTES=删除资源通过ID
VIEW_RESOURCE_BY_ID_NOTES=浏览资源通通过ID
ONLINE_CREATE_RESOURCE_NOTES=在线创建资源
SUFFIX=资源文件后缀
CONTENT=资源文件内容
UPDATE_RESOURCE_NOTES=在线更新资源文件
DOWNLOAD_RESOURCE_NOTES=下载资源文件
CREATE_UDF_FUNCTION_NOTES=创建UDF函数
UDF_TYPE=UDF类型
FUNC_NAME=函数名称
CLASS_NAME=包名类名
ARG_TYPES=参数
UDF_DESC=udf描述,使用说明
VIEW_UDF_FUNCTION_NOTES=查看udf函数
UPDATE_UDF_FUNCTION_NOTES=更新udf函数
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表
VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名
DELETE_UDF_FUNCTION_NOTES=删除UDF函数
AUTHORIZED_FILE_NOTES=授权文件
UNAUTHORIZED_FILE_NOTES=取消授权文件
AUTHORIZED_UDF_FUNC_NOTES=授权udf函数
UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权
VERIFY_QUEUE_NOTES=验证队列
TENANT_TAG=租户相关操作
CREATE_TENANT_NOTES=创建租户
TENANT_CODE=租户编码
TENANT_NAME=租户名称
QUEUE_NAME=队列名
PASSWORD=密码
DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...}
PROJECT_TAG=项目相关操作
CREATE_PROJECT_NOTES=创建项目
PROJECT_DESC=项目描述
UPDATE_PROJECT_NOTES=更新项目
PROJECT_ID=项目ID
QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息
QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表
DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID
QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目
QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目
TASK_RECORD_TAG=任务记录相关操作
QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表
CREATE_TOKEN_NOTES=创建token,注意需要先登录
QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表
SCHEDULE=定时
WARNING_TYPE=发送策略
WARNING_GROUP_ID=发送组ID
FAILURE_STRATEGY=失败策略
RECEIVERS=收件人
RECEIVERS_CC=收件人(抄送)
WORKER_GROUP_ID=Worker Server分组ID
PROCESS_INSTANCE_PRIORITY=流程实例优先级
UPDATE_SCHEDULE_NOTES=更新定时
SCHEDULE_ID=定时ID
ONLINE_SCHEDULE_NOTES=定时上线
OFFLINE_SCHEDULE_NOTES=定时下线
QUERY_SCHEDULE_NOTES=查询定时
QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时
LOGIN_TAG=用户登录相关操作
USER_NAME=用户名
PROJECT_NAME=项目名称
CREATE_PROCESS_DEFINITION_NOTES=创建流程定义
PROCESS_DEFINITION_NAME=流程定义名称
PROCESS_DEFINITION_JSON=流程定义详细信息(json格式)
PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式)
PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式)
PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式)
PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式)
PROCESS_DEFINITION_DESC=流程定义描述信息
PROCESS_DEFINITION_TAG=流程定义相关操作
SIGNOUT_NOTES=退出登录
USER_PASSWORD=用户密码
UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例
QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=验证流程定义名字
LOGIN_NOTES=用户登录
UPDATE_PROCCESS_DEFINITION_NOTES=更新流程定义
PROCESS_DEFINITION_ID=流程定义ID
RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID
QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表
PAGE_NO=页码号
PROCESS_INSTANCE_ID=流程实例ID
PROCESS_INSTANCE_JSON=流程实例信息(json格式)
SCHEDULE_TIME=定时时间
SYNC_DEFINE=更新流程实例的信息是否同步到流程定义
RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例
SEARCH_VAL=搜索值
USER_ID=用户ID
PAGE_SIZE=页大小
LIMIT=显示多少条
VIEW_TREE_NOTES=树状图
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID
PROCESS_DEFINITION_ID_LIST=流程定义id列表
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID
TASK_ID=任务实例ID
SKIP_LINE_NUM=忽略行数
QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志
USERS_TAG=用户相关操作
SCHEDULER_TAG=定时相关操作
CREATE_SCHEDULE_NOTES=创建定时
CREATE_USER_NOTES=创建用户
TENANT_ID=租户ID
QUEUE=使用的队列
EMAIL=邮箱
PHONE=手机号
QUERY_USER_LIST_NOTES=查询用户列表
UPDATE_USER_NOTES=更新用户
DELETE_USER_BY_ID_NOTES=删除用户通过ID
GRANT_PROJECT_NOTES=授权项目
PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割)
GRANT_RESOURCE_NOTES=授权资源文件
RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割)
GET_USER_INFO_NOTES=获取用户信息
LIST_USER_NOTES=用户列表
VERIFY_USER_NAME_NOTES=验证用户名
UNAUTHORIZED_USER_NOTES=取消授权
ALERT_GROUP_ID=报警组ID
AUTHORIZED_USER_NOTES=授权用户
GRANT_UDF_FUNC_NOTES=授权udf函数
UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割)
GRANT_DATASOURCE_NOTES=授权数据源
DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割)
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=查询子流程实例通过任务实例ID
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=查询父流程实例信息通过子流程实例ID
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量
VIEW_GANTT_NOTES=浏览Gantt图
SUB_PROCESS_INSTANCE_ID=子流程是咧ID
TASK_NAME=任务实例名
TASK_INSTANCE_TAG=任务实例相关操作
LOGGER_TAG=日志相关操作
PROCESS_INSTANCE_TAG=流程实例相关操作
EXECUTION_STATUS=工作流和任务节点的运行状态
HOST=运行任务的主机IP地址
START_DATE=开始时间
END_DATE=结束时间
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表
UPDATE_DATA_SOURCE_NOTES=更新数据源
DATA_SOURCE_ID=数据源ID
QUERY_DATA_SOURCE_NOTES=查询数据源通过ID
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=查询数据源列表通过数据源类型
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表
CONNECT_DATA_SOURCE_NOTES=连接数据源
CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试
DELETE_DATA_SOURCE_NOTES=删除数据源
VERIFY_DATA_SOURCE_NOTES=验证数据源
UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源
AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源
DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据

1
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl

@ -0,0 +1 @@
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> easyscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html>

21
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties

@ -0,0 +1,21 @@
# master execute thread num
master.exec.threads=100
# master execute task number in parallel
master.exec.task.number=20
# master heartbeat interval
master.heartbeat.interval=10
# master commit task retry times
master.task.commit.retryTimes=5
# master commit task interval
master.task.commit.interval=100
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=10
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=1

34
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml

@ -0,0 +1,34 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-master.log</file>
<filter class="cn.escheduler.server.master.log.MasterLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="MASTERLOGFILE"/>
</root>
</configuration>

39
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/quartz.properties

@ -0,0 +1,39 @@
#============================================================================
# Configure Main Scheduler Properties
#============================================================================
org.quartz.scheduler.instanceName = EasyScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.makeSchedulerThreadDaemon = true
org.quartz.jobStore.useProperties = false
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.makeThreadsDaemons = true
org.quartz.threadPool.threadCount = 25
org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.dataSource = myDs
#============================================================================
# Configure Datasources
#============================================================================
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
org.quartz.dataSource.myDs.URL=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=utf8
org.quartz.dataSource.myDs.user=root
org.quartz.dataSource.myDs.password=root@123
org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1

15
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties

@ -0,0 +1,15 @@
# worker execute thread num
worker.exec.threads=100
# worker heartbeat interval
worker.heartbeat.interval=10
# submit the number of tasks at a time
worker.fetch.task.num = 3
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
#worker.max.cpuload.avg=10
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=1

53
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml

@ -0,0 +1,53 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="TASKLOGFILE" class="cn.escheduler.server.worker.log.TaskLogAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="cn.escheduler.server.worker.log.TaskLogFilter"></filter>
<file>${log.base}/{processDefinitionId}/{processInstanceId}/{taskInstanceId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-worker.log</file>
<filter class="cn.escheduler.server.worker.log.WorkerLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
     
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
  
</appender>
<root level="INFO">
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/>
</root>
</configuration>

25
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/zookeeper.properties

@ -0,0 +1,25 @@
#zookeeper cluster
zookeeper.quorum=127.0.0.1:2181
#escheduler root directory
zookeeper.escheduler.root=/escheduler
#zookeeper server dirctory
zookeeper.escheduler.dead.servers=/escheduler/dead-servers
zookeeper.escheduler.masters=/escheduler/masters
zookeeper.escheduler.workers=/escheduler/workers
#zookeeper lock dirctory
zookeeper.escheduler.lock.masters=/escheduler/lock/masters
zookeeper.escheduler.lock.workers=/escheduler/lock/workers
#escheduler failover directory
zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters
zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers
zookeeper.escheduler.lock.failover.startup.masters=/escheduler/lock/failover/startup-masters
#escheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.sleep=1000
zookeeper.retry.maxtime=5

263
dockerfile/dockerfile-1.1.x/conf/maven/settings.xml

@ -0,0 +1,263 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<!--
| This is the configuration file for Maven. It can be specified at two levels:
|
| 1. User Level. This settings.xml file provides configuration for a single user,
| and is normally provided in ${user.home}/.m2/settings.xml.
|
| NOTE: This location can be overridden with the CLI option:
|
| -s /path/to/user/settings.xml
|
| 2. Global Level. This settings.xml file provides configuration for all Maven
| users on a machine (assuming they're all using the same Maven
| installation). It's normally provided in
| ${maven.home}/conf/settings.xml.
|
| NOTE: This location can be overridden with the CLI option:
|
| -gs /path/to/global/settings.xml
|
| The sections in this sample file are intended to give you a running start at
| getting the most out of your Maven installation. Where appropriate, the default
| values (values used when the setting is not specified) are provided.
|
|-->
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
<!-- localRepository
| The path to the local repository maven will use to store artifacts.
|
| Default: ${user.home}/.m2/repository
<localRepository>/path/to/local/repo</localRepository>
-->
<!-- interactiveMode
| This will determine whether maven prompts you when it needs input. If set to false,
| maven will use a sensible default value, perhaps based on some other setting, for
| the parameter in question.
|
| Default: true
<interactiveMode>true</interactiveMode>
-->
<!-- offline
| Determines whether maven should attempt to connect to the network when executing a build.
| This will have an effect on artifact downloads, artifact deployment, and others.
|
| Default: false
<offline>false</offline>
-->
<!-- pluginGroups
| This is a list of additional group identifiers that will be searched when resolving plugins by their prefix, i.e.
| when invoking a command line like "mvn prefix:goal". Maven will automatically add the group identifiers
| "org.apache.maven.plugins" and "org.codehaus.mojo" if these are not already contained in the list.
|-->
<pluginGroups>
<!-- pluginGroup
| Specifies a further group identifier to use for plugin lookup.
<pluginGroup>com.your.plugins</pluginGroup>
-->
</pluginGroups>
<!-- proxies
| This is a list of proxies which can be used on this machine to connect to the network.
| Unless otherwise specified (by system property or command-line switch), the first proxy
| specification in this list marked as active will be used.
|-->
<proxies>
<!-- proxy
| Specification for one proxy, to be used in connecting to the network.
|
<proxy>
<id>optional</id>
<active>true</active>
<protocol>http</protocol>
<username>proxyuser</username>
<password>proxypass</password>
<host>proxy.host.net</host>
<port>80</port>
<nonProxyHosts>local.net|some.host.com</nonProxyHosts>
</proxy>
-->
</proxies>
<!-- servers
| This is a list of authentication profiles, keyed by the server-id used within the system.
| Authentication profiles can be used whenever maven must make a connection to a remote server.
|-->
<servers>
<!-- server
| Specifies the authentication information to use when connecting to a particular server, identified by
| a unique name within the system (referred to by the 'id' attribute below).
|
| NOTE: You should either specify username/password OR privateKey/passphrase, since these pairings are
| used together.
|
<server>
<id>deploymentRepo</id>
<username>repouser</username>
<password>repopwd</password>
</server>
-->
<!-- Another sample, using keys to authenticate.
<server>
<id>siteServer</id>
<privateKey>/path/to/private/key</privateKey>
<passphrase>optional; leave empty if not used.</passphrase>
</server>
-->
</servers>
<!-- mirrors
| This is a list of mirrors to be used in downloading artifacts from remote repositories.
|
| It works like this: a POM may declare a repository to use in resolving certain artifacts.
| However, this repository may have problems with heavy traffic at times, so people have mirrored
| it to several places.
|
| That repository definition will have a unique id, so we can create a mirror reference for that
| repository, to be used as an alternate download site. The mirror site will be the preferred
| server for that repository.
|-->
<mirrors>
<!-- mirror
| Specifies a repository mirror site to use instead of a given repository. The repository that
| this mirror serves has an ID that matches the mirrorOf element of this mirror. IDs are used
| for inheritance and direct lookup purposes, and must be unique across the set of mirrors.
|
<mirror>
<id>mirrorId</id>
<mirrorOf>repositoryId</mirrorOf>
<name>Human Readable Name for this Mirror.</name>
<url>http://my.repository.com/repo/path</url>
</mirror>
-->
<mirror>
<id>nexus-aliyun</id>
<mirrorOf>central</mirrorOf>
<name>Nexus aliyun</name>
<url>http://maven.aliyun.com/nexus/content/groups/public</url>
</mirror>
</mirrors>
<!-- profiles
| This is a list of profiles which can be activated in a variety of ways, and which can modify
| the build process. Profiles provided in the settings.xml are intended to provide local machine-
| specific paths and repository locations which allow the build to work in the local environment.
|
| For example, if you have an integration testing plugin - like cactus - that needs to know where
| your Tomcat instance is installed, you can provide a variable here such that the variable is
| dereferenced during the build process to configure the cactus plugin.
|
| As noted above, profiles can be activated in a variety of ways. One way - the activeProfiles
| section of this document (settings.xml) - will be discussed later. Another way essentially
| relies on the detection of a system property, either matching a particular value for the property,
| or merely testing its existence. Profiles can also be activated by JDK version prefix, where a
| value of '1.4' might activate a profile when the build is executed on a JDK version of '1.4.2_07'.
| Finally, the list of active profiles can be specified directly from the command line.
|
| NOTE: For profiles defined in the settings.xml, you are restricted to specifying only artifact
| repositories, plugin repositories, and free-form properties to be used as configuration
| variables for plugins in the POM.
|
|-->
<profiles>
<!-- profile
| Specifies a set of introductions to the build process, to be activated using one or more of the
| mechanisms described above. For inheritance purposes, and to activate profiles via <activatedProfiles/>
| or the command line, profiles have to have an ID that is unique.
|
| An encouraged best practice for profile identification is to use a consistent naming convention
| for profiles, such as 'env-dev', 'env-test', 'env-production', 'user-jdcasey', 'user-brett', etc.
| This will make it more intuitive to understand what the set of introduced profiles is attempting
| to accomplish, particularly when you only have a list of profile id's for debug.
|
| This profile example uses the JDK version to trigger activation, and provides a JDK-specific repo.
<profile>
<id>jdk-1.4</id>
<activation>
<jdk>1.4</jdk>
</activation>
<repositories>
<repository>
<id>jdk14</id>
<name>Repository for JDK 1.4 builds</name>
<url>http://www.myhost.com/maven/jdk14</url>
<layout>default</layout>
<snapshotPolicy>always</snapshotPolicy>
</repository>
</repositories>
</profile>
-->
<!--
| Here is another profile, activated by the system property 'target-env' with a value of 'dev',
| which provides a specific path to the Tomcat instance. To use this, your plugin configuration
| might hypothetically look like:
|
| ...
| <plugin>
| <groupId>org.myco.myplugins</groupId>
| <artifactId>myplugin</artifactId>
|
| <configuration>
| <tomcatLocation>${tomcatPath}</tomcatLocation>
| </configuration>
| </plugin>
| ...
|
| NOTE: If you just wanted to inject this configuration whenever someone set 'target-env' to
| anything, you could just leave off the <value/> inside the activation-property.
|
<profile>
<id>env-dev</id>
<activation>
<property>
<name>target-env</name>
<value>dev</value>
</property>
</activation>
<properties>
<tomcatPath>/path/to/tomcat/instance</tomcatPath>
</properties>
</profile>
-->
</profiles>
<!-- activeProfiles
| List of profiles that are active for all builds.
|
<activeProfiles>
<activeProfile>alwaysActiveProfile</activeProfile>
<activeProfile>anotherAlwaysActiveProfile</activeProfile>
</activeProfiles>
-->
</settings>

31
dockerfile/dockerfile-1.1.x/conf/nginx/default.conf

@ -0,0 +1,31 @@
server {
listen 8888;
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /opt/easyscheduler_source/escheduler-ui/dist;
index index.html index.html;
}
location /escheduler {
proxy_pass http://127.0.0.1:12345;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header x_real_ipP $remote_addr;
proxy_set_header remote_addr $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_connect_timeout 300s;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

28
dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg

@ -0,0 +1,28 @@
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1

8
dockerfile/dockerfile-1.1.x/hooks/build

@ -0,0 +1,8 @@
#!/bin/bash
echo "------ escheduler start - build -------"
printenv
docker build --build-arg version=$version --build-arg tar_version=$tar_version -t $DOCKER_REPO:$version .
echo "------ escheduler end - build -------"

8
dockerfile/dockerfile-1.1.x/hooks/push

@ -0,0 +1,8 @@
#!/bin/bash
echo "------ push start -------"
printenv
docker push $DOCKER_REPO:$version
echo "------ push end -------"

80
dockerfile/dockerfile-1.1.x/startup.sh

@ -0,0 +1,80 @@
#! /bin/bash
set -e
if [ `netstat -anop|grep mysql|wc -l` -gt 0 ];then
echo "MySQL is Running."
else
MYSQL_ROOT_PWD="root@123"
ESZ_DB="escheduler"
echo "启动mysql服务"
chown -R mysql:mysql /var/lib/mysql /var/run/mysqld
find /var/lib/mysql -type f -exec touch {} \; && service mysql restart $ sleep 10
if [ ! -f /nohup.out ];then
echo "设置mysql密码"
mysql --user=root --password=root -e "UPDATE mysql.user set authentication_string=password('$MYSQL_ROOT_PWD') where user='root'; FLUSH PRIVILEGES;"
echo "设置mysql权限"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '$MYSQL_ROOT_PWD' WITH GRANT OPTION; FLUSH PRIVILEGES;"
echo "创建escheduler数据库"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
echo "导入mysql数据"
nohup /opt/escheduler/script/create_escheduler.sh &
sleep 90
fi
if [ `mysql --user=root --password=$MYSQL_ROOT_PWD -s -r -e "SELECT count(TABLE_NAME) FROM information_schema.TABLES WHERE TABLE_SCHEMA='escheduler';" | grep -v count` -eq 38 ];then
echo "\`$ESZ_DB\` 表个数正确"
else
echo "\`$ESZ_DB\` 表个数不正确"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "DROP DATABASE \`$ESZ_DB\`;"
echo "创建escheduler数据库"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
echo "导入mysql数据"
nohup /opt/escheduler/script/create_escheduler.sh &
fi
fi
/opt/zookeeper/bin/zkServer.sh restart
sleep 10
echo "启动api-server"
/opt/escheduler/bin/escheduler-daemon.sh stop api-server
/opt/escheduler/bin/escheduler-daemon.sh start api-server
echo "启动master-server"
/opt/escheduler/bin/escheduler-daemon.sh stop master-server
python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/masters
/opt/escheduler/bin/escheduler-daemon.sh start master-server
echo "启动worker-server"
/opt/escheduler/bin/escheduler-daemon.sh stop worker-server
python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/workers
/opt/escheduler/bin/escheduler-daemon.sh start worker-server
echo "启动logger-server"
/opt/escheduler/bin/escheduler-daemon.sh stop logger-server
/opt/escheduler/bin/escheduler-daemon.sh start logger-server
echo "启动alert-server"
/opt/escheduler/bin/escheduler-daemon.sh stop alert-server
/opt/escheduler/bin/escheduler-daemon.sh start alert-server
echo "启动nginx"
/etc/init.d/nginx stop
nginx &
while true
do
sleep 101
done
exec "$@"
Loading…
Cancel
Save