Browse Source

Merge remote-tracking branch 'remotes/upstream/dev' into dev

pull/2/head
lgcareer 5 years ago
parent
commit
d96418b655
  1. 0
      dockerfile/Dockerfile
  2. 0
      dockerfile/conf/escheduler/conf/alert.properties
  3. 0
      dockerfile/conf/escheduler/conf/alert_logback.xml
  4. 0
      dockerfile/conf/escheduler/conf/apiserver_logback.xml
  5. 0
      dockerfile/conf/escheduler/conf/application.properties
  6. 0
      dockerfile/conf/escheduler/conf/application_master.properties
  7. 0
      dockerfile/conf/escheduler/conf/common/common.properties
  8. 0
      dockerfile/conf/escheduler/conf/common/hadoop/hadoop.properties
  9. 0
      dockerfile/conf/escheduler/conf/config/install_config.conf
  10. 0
      dockerfile/conf/escheduler/conf/config/run_config.conf
  11. 0
      dockerfile/conf/escheduler/conf/dao/data_source.properties
  12. 0
      dockerfile/conf/escheduler/conf/env/.escheduler_env.sh
  13. 0
      dockerfile/conf/escheduler/conf/i18n/messages.properties
  14. 0
      dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties
  15. 0
      dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties
  16. 0
      dockerfile/conf/escheduler/conf/mail_templates/alert_mail_template.ftl
  17. 0
      dockerfile/conf/escheduler/conf/master.properties
  18. 0
      dockerfile/conf/escheduler/conf/master_logback.xml
  19. 0
      dockerfile/conf/escheduler/conf/quartz.properties
  20. 0
      dockerfile/conf/escheduler/conf/worker.properties
  21. 0
      dockerfile/conf/escheduler/conf/worker_logback.xml
  22. 0
      dockerfile/conf/escheduler/conf/zookeeper.properties
  23. 0
      dockerfile/conf/maven/settings.xml
  24. 0
      dockerfile/conf/nginx/default.conf
  25. 0
      dockerfile/conf/zookeeper/zoo.cfg
  26. 145
      dockerfile/dockerfile-1.0.x/Dockerfile
  27. 21
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties
  28. 16
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties
  29. 27
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties
  30. 8
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties
  31. 4
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh
  32. 39
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties
  33. 25
      dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties
  34. 8
      dockerfile/dockerfile-1.0.x/hooks/build
  35. 79
      dockerfile/dockerfile-1.0.x/startup.sh
  36. 31
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml
  37. 42
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml
  38. 1
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties
  39. 3
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf
  40. 4
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf
  41. 53
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties
  42. 1
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl
  43. 21
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties
  44. 34
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml
  45. 15
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties
  46. 53
      dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml
  47. 263
      dockerfile/dockerfile-1.1.x/conf/maven/settings.xml
  48. 31
      dockerfile/dockerfile-1.1.x/conf/nginx/default.conf
  49. 28
      dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg
  50. 8
      dockerfile/dockerfile-1.1.x/hooks/push
  51. 0
      dockerfile/hooks/build
  52. 0
      dockerfile/hooks/push
  53. 5
      dockerfile/startup.sh
  54. 84
      docs/en_US/EasyScheduler Proposal.md
  55. 29
      escheduler-api/src/main/java/cn/escheduler/api/service/SessionService.java
  56. 23
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/SessionMapper.java
  57. 2
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapper.java
  58. 9
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapperProvider.java
  59. 16
      escheduler-dao/src/main/java/cn/escheduler/dao/model/Tenant.java
  60. 72
      escheduler-server/src/main/java/cn/escheduler/server/rpc/LoggerServer.java
  61. 1
      escheduler-server/src/main/java/cn/escheduler/server/utils/ParamUtils.java
  62. 21
      escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java
  63. 72
      escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java
  64. 2
      escheduler-server/src/main/java/cn/escheduler/server/worker/runner/TaskScheduleThread.java

0
dockerfile/dockerfile-1.1.x/Dockerfile → dockerfile/Dockerfile

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert.properties → dockerfile/conf/escheduler/conf/alert.properties

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert_logback.xml → dockerfile/conf/escheduler/conf/alert_logback.xml

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/apiserver_logback.xml → dockerfile/conf/escheduler/conf/apiserver_logback.xml

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application.properties → dockerfile/conf/escheduler/conf/application.properties

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application_master.properties → dockerfile/conf/escheduler/conf/application_master.properties

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/common.properties → dockerfile/conf/escheduler/conf/common/common.properties

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/hadoop/hadoop.properties → dockerfile/conf/escheduler/conf/common/hadoop/hadoop.properties

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/install_config.conf → dockerfile/conf/escheduler/conf/config/install_config.conf

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/run_config.conf → dockerfile/conf/escheduler/conf/config/run_config.conf

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/dao/data_source.properties → dockerfile/conf/escheduler/conf/dao/data_source.properties

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/env/.escheduler_env.sh → dockerfile/conf/escheduler/conf/env/.escheduler_env.sh vendored

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages.properties → dockerfile/conf/escheduler/conf/i18n/messages.properties

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_en_US.properties → dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_zh_CN.properties → dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl → dockerfile/conf/escheduler/conf/mail_templates/alert_mail_template.ftl

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master.properties → dockerfile/conf/escheduler/conf/master.properties

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master_logback.xml → dockerfile/conf/escheduler/conf/master_logback.xml

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/quartz.properties → dockerfile/conf/escheduler/conf/quartz.properties

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker.properties → dockerfile/conf/escheduler/conf/worker.properties

0
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker_logback.xml → dockerfile/conf/escheduler/conf/worker_logback.xml

0
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/zookeeper.properties → dockerfile/conf/escheduler/conf/zookeeper.properties

0
dockerfile/dockerfile-1.0.x/conf/maven/settings.xml → dockerfile/conf/maven/settings.xml

0
dockerfile/dockerfile-1.0.x/conf/nginx/default.conf → dockerfile/conf/nginx/default.conf

0
dockerfile/dockerfile-1.0.x/conf/zookeeper/zoo.cfg → dockerfile/conf/zookeeper/zoo.cfg

145
dockerfile/dockerfile-1.0.x/Dockerfile

@ -1,145 +0,0 @@
FROM ubuntu:18.04
MAINTAINER journey "825193156@qq.com"
ENV LANG=C.UTF-8
ARG version
#1,安装jdk
RUN apt-get update \
&& apt-get -y install openjdk-8-jdk \
&& rm -rf /var/lib/apt/lists/*
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
ENV PATH $JAVA_HOME/bin:$PATH
#安装wget
RUN apt-get update && \
apt-get -y install wget
#2,安装ZK
#RUN cd /opt && \
# wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz && \
# tar -zxvf zookeeper-3.4.6.tar.gz && \
# mv zookeeper-3.4.6 zookeeper && \
# rm -rf ./zookeeper-*tar.gz && \
# mkdir -p /tmp/zookeeper && \
# rm -rf /opt/zookeeper/conf/zoo_sample.cfg
RUN cd /opt && \
wget https://www-us.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz && \
tar -zxvf zookeeper-3.4.14.tar.gz && \
mv zookeeper-3.4.14 zookeeper && \
rm -rf ./zookeeper-*tar.gz && \
mkdir -p /tmp/zookeeper && \
rm -rf /opt/zookeeper/conf/zoo_sample.cfg
ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
ENV ZK_HOME=/opt/zookeeper
ENV PATH $PATH:$ZK_HOME/bin
#3,安装maven
RUN cd /opt && \
wget http://apache-mirror.rbc.ru/pub/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz && \
tar -zxvf apache-maven-3.3.9-bin.tar.gz && \
mv apache-maven-3.3.9 maven && \
rm -rf ./apache-maven-*tar.gz && \
rm -rf /opt/maven/conf/settings.xml
ADD ./conf/maven/settings.xml /opt/maven/conf
ENV MAVEN_HOME=/opt/maven
ENV PATH $PATH:$MAVEN_HOME/bin
#4,安装node
RUN cd /opt && \
wget https://nodejs.org/download/release/v8.9.4/node-v8.9.4-linux-x64.tar.gz && \
tar -zxvf node-v8.9.4-linux-x64.tar.gz && \
mv node-v8.9.4-linux-x64 node && \
rm -rf ./node-v8.9.4-*tar.gz
ENV NODE_HOME=/opt/node
ENV PATH $PATH:$NODE_HOME/bin
#5,下载escheduler
RUN cd /opt && \
wget https://github.com/analysys/EasyScheduler/archive/${version}.tar.gz && \
tar -zxvf ${version}.tar.gz && \
mv EasyScheduler-${version} easyscheduler_source && \
rm -rf ./${version}.tar.gz
#6,后端编译
RUN cd /opt/easyscheduler_source && \
mvn -U clean package assembly:assembly -Dmaven.test.skip=true
#7,前端编译
RUN chmod -R 777 /opt/easyscheduler_source/escheduler-ui && \
cd /opt/easyscheduler_source/escheduler-ui && \
rm -rf /opt/easyscheduler_source/escheduler-ui/node_modules && \
npm install node-sass --unsafe-perm && \
npm install && \
npm run build
#8,安装mysql
RUN echo "deb http://cn.archive.ubuntu.com/ubuntu/ xenial main restricted universe multiverse" >> /etc/apt/sources.list
RUN echo "mysql-server mysql-server/root_password password root" | debconf-set-selections
RUN echo "mysql-server mysql-server/root_password_again password root" | debconf-set-selections
RUN apt-get update && \
apt-get -y install mysql-server-5.7 && \
mkdir -p /var/lib/mysql && \
mkdir -p /var/run/mysqld && \
mkdir -p /var/log/mysql && \
chown -R mysql:mysql /var/lib/mysql && \
chown -R mysql:mysql /var/run/mysqld && \
chown -R mysql:mysql /var/log/mysql
# UTF-8 and bind-address
RUN sed -i -e "$ a [client]\n\n[mysql]\n\n[mysqld]" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[client\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[mysql\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[mysqld\]\)/\1\ninit_connect='SET NAMES utf8'\ncharacter-set-server = utf8\ncollation-server=utf8_general_ci\nbind-address = 0.0.0.0/g" /etc/mysql/my.cnf
#9,安装nginx
RUN apt-get update && \
apt-get install -y nginx && \
rm -rf /var/lib/apt/lists/* && \
echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
chown -R www-data:www-data /var/lib/nginx
#10,修改escheduler配置文件
#后端配置
RUN mkdir -p /opt/escheduler && \
tar -zxvf /opt/easyscheduler_source/target/escheduler-${version}.tar.gz -C /opt/escheduler && \
rm -rf /opt/escheduler/conf
ADD ./conf/escheduler/conf /opt/escheduler/conf
#前端nginx配置
ADD ./conf/nginx/default.conf /etc/nginx/conf.d
#11,开放端口
EXPOSE 2181 2888 3888 3306 80 12345 8888
#12,安装sudo,python,vim,ping和ssh
RUN apt-get update && \
apt-get -y install sudo && \
apt-get -y install python && \
apt-get -y install vim && \
apt-get -y install iputils-ping && \
apt-get -y install net-tools && \
apt-get -y install openssh-server && \
apt-get -y install python-pip && \
pip install kazoo
COPY ./startup.sh /root/startup.sh
#13,修改权限和设置软连
RUN chmod +x /root/startup.sh && \
chmod +x /opt/escheduler/script/create_escheduler.sh && \
chmod +x /opt/zookeeper/bin/zkServer.sh && \
chmod +x /opt/escheduler/bin/escheduler-daemon.sh && \
rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls
ENTRYPOINT ["/root/startup.sh"]

21
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties

@ -1,21 +0,0 @@
#alert type is EMAIL/SMS
alert.type=EMAIL
# mail server configuration
mail.protocol=SMTP
mail.server.host=smtp.office365.com
mail.server.port=587
mail.sender=qiaozhanwei@outlook.com
mail.passwd=eschedulerBJEG
# TLS
mail.smtp.starttls.enable=true
# SSL
mail.smtp.ssl.enable=false
#xls file path,need create if not exist
xls.file.path=/tmp/xls

16
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties

@ -1,16 +0,0 @@
# server port
server.port=12345
# session config
server.session.timeout=7200
server.context-path=/escheduler/
# file size limit for upload
spring.http.multipart.max-file-size=1024MB
spring.http.multipart.max-request-size=1024MB
#post content
server.max-http-post-size=5000000

27
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties

@ -1,27 +0,0 @@
#task queue implementation, default "zookeeper"
escheduler.queue.impl=zookeeper
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/escheduler
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/escheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/escheduler/exec
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
data.store2hdfs.basepath=/escheduler
# whether hdfs starts
hdfs.startup.state=false
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
escheduler.env.path=/opt/escheduler/conf/env/.escheduler_env.sh
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default "false"
development.state=false

8
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties

@ -1,8 +0,0 @@
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
fs.defaultFS=hdfs://mycluster:8020
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s

4
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh vendored

@ -1,4 +0,0 @@
export PYTHON_HOME=/usr/bin/python
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH

39
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties

@ -1,39 +0,0 @@
#============================================================================
# Configure Main Scheduler Properties
#============================================================================
org.quartz.scheduler.instanceName = EasyScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.makeSchedulerThreadDaemon = true
org.quartz.jobStore.useProperties = false
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.makeThreadsDaemons = true
org.quartz.threadPool.threadCount = 25
org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.dataSource = myDs
#============================================================================
# Configure Datasources
#============================================================================
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
org.quartz.dataSource.myDs.URL = jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=utf8
org.quartz.dataSource.myDs.user = root
org.quartz.dataSource.myDs.password = root@123
org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1

25
dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties

@ -1,25 +0,0 @@
#zookeeper cluster
zookeeper.quorum=127.0.0.1:2181
#escheduler root directory
zookeeper.escheduler.root=/escheduler
#zookeeper server dirctory
zookeeper.escheduler.dead.servers=/escheduler/dead-servers
zookeeper.escheduler.masters=/escheduler/masters
zookeeper.escheduler.workers=/escheduler/workers
#zookeeper lock dirctory
zookeeper.escheduler.lock.masters=/escheduler/lock/masters
zookeeper.escheduler.lock.workers=/escheduler/lock/workers
#escheduler failover directory
zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters
zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers
zookeeper.escheduler.lock.failover.startup.masters=/escheduler/lock/failover/startup-masters
#escheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.sleep=1000
zookeeper.retry.maxtime=5

8
dockerfile/dockerfile-1.0.x/hooks/build

@ -1,8 +0,0 @@
#!/bin/bash
echo "------ escheduler start - build -------"
printenv
docker build --build-arg version=$version -t $DOCKER_REPO:$version .
echo "------ escheduler end - build -------"

79
dockerfile/dockerfile-1.0.x/startup.sh

@ -1,79 +0,0 @@
#! /bin/bash
set -e
if [ `netstat -anop|grep mysql|wc -l` -gt 0 ];then
echo "MySQL is Running."
else
MYSQL_ROOT_PWD="root@123"
ESZ_DB="escheduler"
echo "启动mysql服务"
chown -R mysql:mysql /var/lib/mysql /var/run/mysqld
find /var/lib/mysql -type f -exec touch {} \; && service mysql restart $ sleep 10
if [ ! -f /nohup.out ];then
echo "设置mysql密码"
mysql --user=root --password=root -e "UPDATE mysql.user set authentication_string=password('$MYSQL_ROOT_PWD') where user='root'; FLUSH PRIVILEGES;"
echo "设置mysql权限"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '$MYSQL_ROOT_PWD' WITH GRANT OPTION; FLUSH PRIVILEGES;"
echo "创建escheduler数据库"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
echo "导入mysql数据"
nohup /opt/escheduler/script/create_escheduler.sh &
fi
if [ `mysql --user=root --password=$MYSQL_ROOT_PWD -s -r -e "SELECT count(TABLE_NAME) FROM information_schema.TABLES WHERE TABLE_SCHEMA='escheduler';" | grep -v count` -eq 38 ];then
echo "\`$ESZ_DB\` 表个数正确"
else
echo "\`$ESZ_DB\` 表个数不正确"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "DROP DATABASE \`$ESZ_DB\`;"
echo "创建escheduler数据库"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
echo "导入mysql数据"
nohup /opt/escheduler/script/create_escheduler.sh &
fi
fi
/opt/zookeeper/bin/zkServer.sh restart
sleep 10
echo "启动api-server"
/opt/escheduler/bin/escheduler-daemon.sh stop api-server
/opt/escheduler/bin/escheduler-daemon.sh start api-server
echo "启动master-server"
/opt/escheduler/bin/escheduler-daemon.sh stop master-server
python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/masters
/opt/escheduler/bin/escheduler-daemon.sh start master-server
echo "启动worker-server"
/opt/escheduler/bin/escheduler-daemon.sh stop worker-server
python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/workers
/opt/escheduler/bin/escheduler-daemon.sh start worker-server
echo "启动logger-server"
/opt/escheduler/bin/escheduler-daemon.sh stop logger-server
/opt/escheduler/bin/escheduler-daemon.sh start logger-server
echo "启动alert-server"
/opt/escheduler/bin/escheduler-daemon.sh stop alert-server
/opt/escheduler/bin/escheduler-daemon.sh start alert-server
echo "启动nginx"
/etc/init.d/nginx stop
nginx &
while true
do
sleep 101
done
exec "$@"

31
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml

@ -1,31 +0,0 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-alert.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>20</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="ALERTLOGFILE"/>
</root>
</configuration>

42
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml

@ -1,42 +0,0 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- Log level filter -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<file>${log.base}/escheduler-api-server.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="APISERVERLOGFILE" />
</root>
</configuration>

1
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties

@ -1 +0,0 @@
logging.config=classpath:master_logback.xml

3
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf

@ -1,3 +0,0 @@
installPath=/data1_1T/escheduler
deployUser=escheduler
ips=ark0,ark1,ark2,ark3,ark4

4
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf

@ -1,4 +0,0 @@
masters=ark0,ark1
workers=ark2,ark3,ark4
alertServer=ark3
apiServers=ark1

53
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties

@ -1,53 +0,0 @@
# base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
spring.datasource.driver-class-name=com.mysql.jdbc.Driver
spring.datasource.url=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=UTF-8
spring.datasource.username=root
spring.datasource.password=root@123
# connection configuration
spring.datasource.initialSize=5
# min connection number
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
# data quality analysis is not currently in use. please ignore the following configuration
# task record flag
task.record.flag=false
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
task.record.datasource.username=xx
task.record.datasource.password=xx

1
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl

@ -1 +0,0 @@
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> easyscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html>

21
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties

@ -1,21 +0,0 @@
# master execute thread num
master.exec.threads=100
# master execute task number in parallel
master.exec.task.number=20
# master heartbeat interval
master.heartbeat.interval=10
# master commit task retry times
master.task.commit.retryTimes=5
# master commit task interval
master.task.commit.interval=100
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=10
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=1

34
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml

@ -1,34 +0,0 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-master.log</file>
<filter class="cn.escheduler.server.master.log.MasterLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="MASTERLOGFILE"/>
</root>
</configuration>

15
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties

@ -1,15 +0,0 @@
# worker execute thread num
worker.exec.threads=100
# worker heartbeat interval
worker.heartbeat.interval=10
# submit the number of tasks at a time
worker.fetch.task.num = 3
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
#worker.max.cpuload.avg=10
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=1

53
dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml

@ -1,53 +0,0 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="TASKLOGFILE" class="cn.escheduler.server.worker.log.TaskLogAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="cn.escheduler.server.worker.log.TaskLogFilter"></filter>
<file>${log.base}/{processDefinitionId}/{processInstanceId}/{taskInstanceId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-worker.log</file>
<filter class="cn.escheduler.server.worker.log.WorkerLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
     
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
  
</appender>
<root level="INFO">
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/>
</root>
</configuration>

263
dockerfile/dockerfile-1.1.x/conf/maven/settings.xml

@ -1,263 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<!--
| This is the configuration file for Maven. It can be specified at two levels:
|
| 1. User Level. This settings.xml file provides configuration for a single user,
| and is normally provided in ${user.home}/.m2/settings.xml.
|
| NOTE: This location can be overridden with the CLI option:
|
| -s /path/to/user/settings.xml
|
| 2. Global Level. This settings.xml file provides configuration for all Maven
| users on a machine (assuming they're all using the same Maven
| installation). It's normally provided in
| ${maven.home}/conf/settings.xml.
|
| NOTE: This location can be overridden with the CLI option:
|
| -gs /path/to/global/settings.xml
|
| The sections in this sample file are intended to give you a running start at
| getting the most out of your Maven installation. Where appropriate, the default
| values (values used when the setting is not specified) are provided.
|
|-->
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
<!-- localRepository
| The path to the local repository maven will use to store artifacts.
|
| Default: ${user.home}/.m2/repository
<localRepository>/path/to/local/repo</localRepository>
-->
<!-- interactiveMode
| This will determine whether maven prompts you when it needs input. If set to false,
| maven will use a sensible default value, perhaps based on some other setting, for
| the parameter in question.
|
| Default: true
<interactiveMode>true</interactiveMode>
-->
<!-- offline
| Determines whether maven should attempt to connect to the network when executing a build.
| This will have an effect on artifact downloads, artifact deployment, and others.
|
| Default: false
<offline>false</offline>
-->
<!-- pluginGroups
| This is a list of additional group identifiers that will be searched when resolving plugins by their prefix, i.e.
| when invoking a command line like "mvn prefix:goal". Maven will automatically add the group identifiers
| "org.apache.maven.plugins" and "org.codehaus.mojo" if these are not already contained in the list.
|-->
<pluginGroups>
<!-- pluginGroup
| Specifies a further group identifier to use for plugin lookup.
<pluginGroup>com.your.plugins</pluginGroup>
-->
</pluginGroups>
<!-- proxies
| This is a list of proxies which can be used on this machine to connect to the network.
| Unless otherwise specified (by system property or command-line switch), the first proxy
| specification in this list marked as active will be used.
|-->
<proxies>
<!-- proxy
| Specification for one proxy, to be used in connecting to the network.
|
<proxy>
<id>optional</id>
<active>true</active>
<protocol>http</protocol>
<username>proxyuser</username>
<password>proxypass</password>
<host>proxy.host.net</host>
<port>80</port>
<nonProxyHosts>local.net|some.host.com</nonProxyHosts>
</proxy>
-->
</proxies>
<!-- servers
| This is a list of authentication profiles, keyed by the server-id used within the system.
| Authentication profiles can be used whenever maven must make a connection to a remote server.
|-->
<servers>
<!-- server
| Specifies the authentication information to use when connecting to a particular server, identified by
| a unique name within the system (referred to by the 'id' attribute below).
|
| NOTE: You should either specify username/password OR privateKey/passphrase, since these pairings are
| used together.
|
<server>
<id>deploymentRepo</id>
<username>repouser</username>
<password>repopwd</password>
</server>
-->
<!-- Another sample, using keys to authenticate.
<server>
<id>siteServer</id>
<privateKey>/path/to/private/key</privateKey>
<passphrase>optional; leave empty if not used.</passphrase>
</server>
-->
</servers>
<!-- mirrors
| This is a list of mirrors to be used in downloading artifacts from remote repositories.
|
| It works like this: a POM may declare a repository to use in resolving certain artifacts.
| However, this repository may have problems with heavy traffic at times, so people have mirrored
| it to several places.
|
| That repository definition will have a unique id, so we can create a mirror reference for that
| repository, to be used as an alternate download site. The mirror site will be the preferred
| server for that repository.
|-->
<mirrors>
<!-- mirror
| Specifies a repository mirror site to use instead of a given repository. The repository that
| this mirror serves has an ID that matches the mirrorOf element of this mirror. IDs are used
| for inheritance and direct lookup purposes, and must be unique across the set of mirrors.
|
<mirror>
<id>mirrorId</id>
<mirrorOf>repositoryId</mirrorOf>
<name>Human Readable Name for this Mirror.</name>
<url>http://my.repository.com/repo/path</url>
</mirror>
-->
<mirror>
<id>nexus-aliyun</id>
<mirrorOf>central</mirrorOf>
<name>Nexus aliyun</name>
<url>http://maven.aliyun.com/nexus/content/groups/public</url>
</mirror>
</mirrors>
<!-- profiles
| This is a list of profiles which can be activated in a variety of ways, and which can modify
| the build process. Profiles provided in the settings.xml are intended to provide local machine-
| specific paths and repository locations which allow the build to work in the local environment.
|
| For example, if you have an integration testing plugin - like cactus - that needs to know where
| your Tomcat instance is installed, you can provide a variable here such that the variable is
| dereferenced during the build process to configure the cactus plugin.
|
| As noted above, profiles can be activated in a variety of ways. One way - the activeProfiles
| section of this document (settings.xml) - will be discussed later. Another way essentially
| relies on the detection of a system property, either matching a particular value for the property,
| or merely testing its existence. Profiles can also be activated by JDK version prefix, where a
| value of '1.4' might activate a profile when the build is executed on a JDK version of '1.4.2_07'.
| Finally, the list of active profiles can be specified directly from the command line.
|
| NOTE: For profiles defined in the settings.xml, you are restricted to specifying only artifact
| repositories, plugin repositories, and free-form properties to be used as configuration
| variables for plugins in the POM.
|
|-->
<profiles>
<!-- profile
| Specifies a set of introductions to the build process, to be activated using one or more of the
| mechanisms described above. For inheritance purposes, and to activate profiles via <activatedProfiles/>
| or the command line, profiles have to have an ID that is unique.
|
| An encouraged best practice for profile identification is to use a consistent naming convention
| for profiles, such as 'env-dev', 'env-test', 'env-production', 'user-jdcasey', 'user-brett', etc.
| This will make it more intuitive to understand what the set of introduced profiles is attempting
| to accomplish, particularly when you only have a list of profile id's for debug.
|
| This profile example uses the JDK version to trigger activation, and provides a JDK-specific repo.
<profile>
<id>jdk-1.4</id>
<activation>
<jdk>1.4</jdk>
</activation>
<repositories>
<repository>
<id>jdk14</id>
<name>Repository for JDK 1.4 builds</name>
<url>http://www.myhost.com/maven/jdk14</url>
<layout>default</layout>
<snapshotPolicy>always</snapshotPolicy>
</repository>
</repositories>
</profile>
-->
<!--
| Here is another profile, activated by the system property 'target-env' with a value of 'dev',
| which provides a specific path to the Tomcat instance. To use this, your plugin configuration
| might hypothetically look like:
|
| ...
| <plugin>
| <groupId>org.myco.myplugins</groupId>
| <artifactId>myplugin</artifactId>
|
| <configuration>
| <tomcatLocation>${tomcatPath}</tomcatLocation>
| </configuration>
| </plugin>
| ...
|
| NOTE: If you just wanted to inject this configuration whenever someone set 'target-env' to
| anything, you could just leave off the <value/> inside the activation-property.
|
<profile>
<id>env-dev</id>
<activation>
<property>
<name>target-env</name>
<value>dev</value>
</property>
</activation>
<properties>
<tomcatPath>/path/to/tomcat/instance</tomcatPath>
</properties>
</profile>
-->
</profiles>
<!-- activeProfiles
| List of profiles that are active for all builds.
|
<activeProfiles>
<activeProfile>alwaysActiveProfile</activeProfile>
<activeProfile>anotherAlwaysActiveProfile</activeProfile>
</activeProfiles>
-->
</settings>

31
dockerfile/dockerfile-1.1.x/conf/nginx/default.conf

@ -1,31 +0,0 @@
server {
listen 8888;
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /opt/easyscheduler_source/escheduler-ui/dist;
index index.html index.html;
}
location /escheduler {
proxy_pass http://127.0.0.1:12345;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header x_real_ipP $remote_addr;
proxy_set_header remote_addr $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_connect_timeout 300s;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

28
dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg

@ -1,28 +0,0 @@
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1

8
dockerfile/dockerfile-1.1.x/hooks/push

@ -1,8 +0,0 @@
#!/bin/bash
echo "------ push start -------"
printenv
docker push $DOCKER_REPO:$version
echo "------ push end -------"

0
dockerfile/dockerfile-1.1.x/hooks/build → dockerfile/hooks/build

0
dockerfile/dockerfile-1.0.x/hooks/push → dockerfile/hooks/push

5
dockerfile/dockerfile-1.1.x/startup.sh → dockerfile/startup.sh

@ -30,13 +30,14 @@ else
echo "创建escheduler数据库"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
echo "导入mysql数据"
nohup /opt/escheduler/script/create_escheduler.sh &
nohup /opt/escheduler/script/create_escheduler.sh &
sleep 90
fi
fi
/opt/zookeeper/bin/zkServer.sh restart
sleep 10
sleep 90
echo "启动api-server"
/opt/escheduler/bin/escheduler-daemon.sh stop api-server

84
docs/en_US/EasyScheduler Proposal.md

@ -2,28 +2,20 @@
## Abstract
EasyScheduler is a distributed visual workflow scheduling system, which focuses on solving the problem of "complex task dependencies" in data processing. just like its name, we dedicated to making the scheduling system `out of the box` .
EasyScheduler is a distributed ETL scheduling engine with powerful DAG visualization interface. EasyScheduler focuses on solving the problem of 'complex task dependencies & triggers ' in data processing. Just like its name, we dedicated to making the scheduling system `out of the box`.
## Proposal
EasyScheduler provides many easy-to-use features to simplify the use of data processing workflow,We propose the clear concept of "instance of process" and "instance of task" to make it more convenient to get the running state of workflow every time. its main objectives are as follows:
- Associate the tasks according to the dependencies of the tasks in a DAG graph, which can visualize the running state of task in real time.
- Support for many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc.
- Support process scheduling, dependency scheduling, manual scheduling, manual pause/stop/recovery, support failed retry/alarm, recovery from specified nodes, kill task, etc.
- Support process priority, task priority and task failover and task timeout alarm/failure
- Support process global parameters and node custom parameter settings
- Support online upload/download of resource files, management, etc. Support online file creation and editing
- Support task log online viewing and scrolling, online download log, etc.
- Implement cluster HA, decentralize Master cluster and Worker cluster through Zookeeper
- Support online viewing of `Master/Worker` cpu load, memory
- Support process running history tree/gantt chart display, support task status statistics, process status statistics
- Support backfilling data
- Support multi-tenant
- Easy to maintain
for now, EasyScheduler has a fairly huge community in China.
It is also widely adopted by many [companies and organizations](https://github.com/analysys/EasyScheduler/issues/57) as an ETL scheduling tool.
EasyScheduler provides many easy-to-use features to accelerate the engineer enficiency on data ETL workflow job. We propose a new concept of 'instance of process' and 'instance of task' to let developers to tuning their jobs on the running state of workflow instead of changing the task's template. Its main objectives are as follows:
- Define the complex tasks' dependencies & triggers in a DAG graph by dragging and dropping.
- Support cluster HA.
- Support multi-tenant and parallel or serial backfilling data.
- Support automatical failure job retry and recovery.
- Support many data task types and process priority, task priority and relative task timeout alarm.
For now, EasyScheduler has a fairly huge community in China.
It is also widely adopted by many [companies and organizations](https://github.com/analysys/EasyScheduler/issues/57) as its ETL scheduling tool.
We believe that bringing EasyScheduler into ASF could advance development of a much more stronger and more diverse open source community.
@ -38,15 +30,15 @@ The codes are already under Apache License Version 2.0.
We want to find a data processing tool with the following features:
- easy to use,It can be assembled into a process with a very simple drag and drop operation. not only for developers,people who can't write code also can use
- solving the problem of "complex task dependencies" , and can monitor the running status
- support multi-tenant
- support many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc.
- linear scalability
- Easy to use,developers can build a ETL process with a very simple drag and drop operation. not only for ETL developers,people who can't write code also can use this tool for ETL operation such as system adminitrator.
- Solving the problem of "complex task dependencies" , and it can monitor the ETL running status.
- Support multi-tenant.
- Support many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc.
- Support HA and linear scalability.
For the above reasons, we realized that no existing product met our exact requirements externally, so we decided to develop it ourselves.EasyScheduler completed the architecture design at the end of 2017. The first internal use version was completed in May 2018. We then iterated several versions and the system gradually stabilized.
For the above reasons, we realized that no existing product met our requirements, so we decided to develop this tool ourselves. We designed EasyScheduler at the end of 2017. The first internal use version was completed in May 2018. We then iterated several internal versions and the system gradually became stabilized.
EasyScheduler won the `GVP` (Gitee Most Valuable Project) in April 2019
Then we open the source code of EasyScheduler on March 2019. It soon gained lot's of ETL developers interest and stars on github. Then it won the `GVP` (Gitee Most Valuable Project) in April 2019 and our key member was invited to GAIC Summit 2019 for speech on June 2019.
## Rationale
@ -80,9 +72,7 @@ Thus, it is very unlikely that EasyScheduler becomes orphaned.
### Inexperience with Open Source
The core developers are all active users and followers of open source. They are already committers and contributors to the EasyScheduler Github project. All have been involved with the source code that has been released under an open source license, and several of them also have experience developing code in an open source environment, they are also active in presto, alluxio and other projects.
Therefore, we believe we have enough experience to deal with open source.
EasyScheduler's core developers have been running it as a community-oriented open source project for some time, several of them already have experience working with open source communities, they are also active in presto, alluxio and other projects.At the same time, we will learn more open source experience from the excellent apache open source project to make up for this shortcoming.
### Homogenous Developers
@ -92,7 +82,7 @@ Considering that fengjr and sefonsoft have shown great interest in EasyScheduler
### Reliance on Salaried Developers
At present, four of the core developers are paid by their employer to contribute to EasyScheduler project.
At present, eight of the core developers are paid by their employer to contribute to EasyScheduler project.
we also find some developers and researchers (>8) to contribute to the project, and we will make efforts to increase the diversity of the contributors and actively lobby for Domain experts in the workflow space to contribute.
### Relationships with Other Apache Products
@ -106,20 +96,18 @@ However, we prefer that the community provided by the Apache Software Foundation
## Documentation
A complete set of Sharding-Sphere documentations is provided on shardingsphere.io in both English and Simplified Chinese.
A complete set of EasyScheduler documentations is provided on github in both English and Simplified Chinese.
- [English](https://github.com/analysys/easyscheduler_docs/en_US)
- [Chinese](https://github.com/analysys/easyscheduler_docs/zh_CN)
- [English](https://github.com/analysys/easyscheduler_docs)
- [Chinese](https://github.com/analysys/easyscheduler_docs_cn)
## Initial Source
The project consists of two distinct codebases: core and document. The address of two existed git repositories are as follows:
The project consists of three distinct codebases: core and document. The address of two existed git repositories are as follows:
- <https://github.com/analysys/easyscheduler>
- <https://github.com/analysys/easyscheduler_docs>
- <https://github.com/analysys/easyscheduler_docs_cn>
## Source and Intellectual Property Submission Plan
@ -129,7 +117,7 @@ As soon as EasyScheduler is approved to join Apache Incubator, Analysys will exe
As all backend code dependencies are managed using Apache Maven, none of the external libraries need to be packaged in a source distribution.
most of dependencies have Apache compatible licenses,and the detail as follows:
Most of dependencies have Apache compatible licenses,and the detail as follows:
### Backend Dependency
@ -1389,6 +1377,7 @@ The front-end UI currently relies on many components, which we will list separat
- <https://github.com/analysys/EasyScheduler.git>
- <https://github.com/analysys/easyscheduler_docs.git>
- <https://github.com/analysys/easyscheduler_docs_cn.git>
### Issue Tracking
@ -1406,14 +1395,14 @@ Travis (TODO)
## Initial Committers
- William-GuoWei
- Lidong Dai
- Zhanwei Qiao
- William-GuoWei(guowei20m@outlook.com)
- Lidong Dai(lidong.dai@outlook.com)
- Zhanwei Qiao(qiaozhanwei@outlook.com)
- Liang Bao
- Gang Li
- Zijian Gong
- Zijian Gong(quanquansy@gmail.com)
- Jun Gao
- Baoqi Wu
- Baoqi Wu(wubaoqi@gmail.com)
## Affiliations
@ -1433,17 +1422,14 @@ Travis (TODO)
### Mentors
- Sheng Wu ( Apache Software Foundation Member [wusheng@apache.org](mailto:wusheng@apache.org))
- Sheng Wu ( Apache Incubator PMC, [wusheng@apache.org](mailto:wusheng@apache.org))
- ShaoFeng Shi ( Apache Software Foundation Incubator PMC [wusheng@apache.org](mailto:wusheng@apache.org))
- ShaoFeng Shi ( Apache Kylin committer & PMC, Apache Incubator PMC, [shaofengshi@apache.org](mailto:wusheng@apache.org))
- Liang Chen ( Apache Software Foundation Member chenliang613@apache.org](mailto:chenliang613@apache.org))
- Liang Chen ( Apache Software Foundation Member, chenliang613@apache.org](mailto:chenliang613@apache.org))
### Sponsoring Entity
We are expecting the Apache Incubator could sponsor this project.

29
escheduler-api/src/main/java/cn/escheduler/api/service/SessionService.java

@ -19,6 +19,7 @@ package cn.escheduler.api.service;
import cn.escheduler.api.controller.BaseController;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.common.utils.CollectionUtils;
import cn.escheduler.dao.mapper.SessionMapper;
import cn.escheduler.dao.model.Session;
import cn.escheduler.dao.model.User;
@ -31,6 +32,7 @@ import org.springframework.stereotype.Service;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import java.util.Date;
import java.util.List;
import java.util.UUID;
/**
@ -68,7 +70,7 @@ public class SessionService extends BaseService{
String ip = BaseController.getClientIpAddress(request);
logger.info("get session: {}, ip: {}", sessionId, ip);
return sessionMapper.queryByIdAndIp(sessionId);
return sessionMapper.queryBySessionId(sessionId);
}
/**
@ -79,14 +81,26 @@ public class SessionService extends BaseService{
* @return
*/
public String createSession(User user, String ip) {
Session session = null;
// logined
Session session = sessionMapper.queryByUserIdAndIp(user.getId());
List<Session> sessionList = sessionMapper.queryByUserId(user.getId());
Date now = new Date();
/**
* if you have logged in and are still valid, return directly
*/
if (session != null) {
if (CollectionUtils.isNotEmpty(sessionList)) {
// is session list greater 1 , delete other ,get one
if (sessionList.size() > 1){
for (int i=1 ; i < sessionList.size();i++){
sessionMapper.deleteById(sessionList.get(i).getId());
}
}
session = sessionList.get(0);
if (now.getTime() - session.getLastLoginTime().getTime() <= Constants.SESSION_TIME_OUT * 1000) {
/**
* updateProcessInstance the latest login time
@ -126,8 +140,11 @@ public class SessionService extends BaseService{
/**
* query session by user id and ip
*/
Session session = sessionMapper.queryByUserIdAndIp(loginUser.getId());
//delete session
sessionMapper.deleteById(session.getId());
List<Session> sessionList = sessionMapper.queryByUserId(loginUser.getId());
for (Session session : sessionList){
//delete session
sessionMapper.deleteById(session.getId());
}
}
}

23
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/SessionMapper.java

@ -22,6 +22,7 @@ import org.apache.ibatis.type.JdbcType;
import java.sql.Timestamp;
import java.util.Date;
import java.util.List;
/**
* session mapper
@ -57,20 +58,6 @@ public interface SessionMapper {
int update(@Param("sessionId") String sessionId, @Param("loginTime") Date loginTime);
/**
* query by session id
* @param sessionId
* @return
*/
@Results(value = {@Result(property = "id", column = "id", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "userId", column = "user_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "ip", column = "ip", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "lastLoginTime", column = "last_login_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE)
})
@SelectProvider(type = SessionMapperProvider.class, method = "queryById")
Session queryById(@Param("sessionId") int sessionId);
/**
* query by session id and ip
*
@ -83,8 +70,8 @@ public interface SessionMapper {
@Result(property = "ip", column = "ip", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "lastLoginTime", column = "last_login_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE)
})
@SelectProvider(type = SessionMapperProvider.class, method = "queryByIdAndIp")
Session queryByIdAndIp(@Param("sessionId") String sessionId);
@SelectProvider(type = SessionMapperProvider.class, method = "queryBySessionId")
Session queryBySessionId(@Param("sessionId") String sessionId);
/**
@ -98,7 +85,7 @@ public interface SessionMapper {
@Result(property = "ip", column = "ip", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "lastLoginTime", column = "last_login_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE)
})
@SelectProvider(type = SessionMapperProvider.class, method = "queryByUserIdAndIp")
Session queryByUserIdAndIp(@Param("userId") int userId);
@SelectProvider(type = SessionMapperProvider.class, method = "queryByUserId")
List<Session> queryByUserId(@Param("userId") int userId);
}

2
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapper.java

@ -68,6 +68,8 @@ public interface TenantMapper {
@Result(property = "tenantName", column = "tenant_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "desc", column = "desc", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "queueId", column = "queue_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "queueName", column = "queue_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "queue", column = "queue", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
@Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
})

9
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapperProvider.java

@ -97,11 +97,10 @@ public class TenantMapperProvider {
public String queryById(Map<String, Object> parameter) {
return new SQL() {
{
SELECT("*");
FROM(TABLE_NAME);
WHERE("`id` = #{tenantId}");
SELECT("t.*,q.queue_name,q.queue");
FROM(TABLE_NAME + " t,t_escheduler_queue q");
WHERE(" t.queue_id = q.id");
WHERE(" t.id = #{tenantId}");
}
}.toString();
}

16
escheduler-dao/src/main/java/cn/escheduler/dao/model/Tenant.java

@ -53,6 +53,12 @@ public class Tenant {
*/
private String queueName;
/**
* queue
*/
private String queue;
/**
* create time
*/
@ -126,6 +132,15 @@ public class Tenant {
public void setQueueName(String queueName) {
this.queueName = queueName;
}
public String getQueue() {
return queue;
}
public void setQueue(String queue) {
this.queue = queue;
}
@Override
public String toString() {
return "Tenant{" +
@ -135,6 +150,7 @@ public class Tenant {
", desc='" + desc + '\'' +
", queueId=" + queueId +
", queueName='" + queueName + '\'' +
", queue='" + queue + '\'' +
", createTime=" + createTime +
", updateTime=" + updateTime +
'}';

72
escheduler-server/src/main/java/cn/escheduler/server/rpc/LoggerServer.java

@ -92,14 +92,21 @@ public class LoggerServer {
@Override
public void rollViewLog(LogParameter request, StreamObserver<RetStrInfo> responseObserver) {
logger.info("log parameter path : {} ,skipLine : {}, limit : {}",
logger.info("log parameter path : {} ,skip line : {}, limit : {}",
request.getPath(),
request.getSkipLineNum(),
request.getLimit());
List<String> list = readFile(request.getPath(), request.getSkipLineNum(), request.getLimit());
StringBuilder sb = new StringBuilder();
boolean errorLineFlag = false;
for (String line : list){
sb.append(line + "\r\n");
if (line.contains("TaskLogger")){
errorLineFlag = filterLine(request.getPath(),line);
}
if (!errorLineFlag || !line.contains("TaskLogger")){
sb.append(line + "\r\n");
}
}
RetStrInfo retInfoBuild = RetStrInfo.newBuilder().setMsg(sb.toString()).build();
responseObserver.onNext(retInfoBuild);
@ -123,7 +130,7 @@ public class LoggerServer {
responseObserver.onNext(builder.build());
responseObserver.onCompleted();
}catch (Exception e){
logger.error("get log bytes failed : " + e.getMessage(),e);
logger.error("get log bytes failed",e);
}
}
}
@ -134,23 +141,35 @@ public class LoggerServer {
* @return
* @throws Exception
*/
private static byte[] getFileBytes(String path)throws IOException{
private static byte[] getFileBytes(String path){
InputStream in = null;
ByteArrayOutputStream bos = null;
try {
in = new FileInputStream(path);
bos = new ByteArrayOutputStream();
byte[] buffer = new byte[4096];
int n = 0;
while ((n = in.read(buffer)) != -1) {
bos.write(buffer, 0, n);
byte[] buf = new byte[1024];
int len = 0;
while ((len = in.read(buf)) != -1) {
bos.write(buf, 0, len);
}
return bos.toByteArray();
}catch (IOException e){
logger.error("getFileBytes error",e);
logger.error("get file bytes error",e);
}finally {
bos.close();
in.close();
if (bos != null){
try {
bos.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (in != null){
try {
in.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return null;
}
@ -166,7 +185,7 @@ public class LoggerServer {
try (Stream<String> stream = Files.lines(Paths.get(path))) {
return stream.skip(skipLine).limit(limit).collect(Collectors.toList());
} catch (IOException e) {
logger.error("read file failed : " + e.getMessage(),e);
logger.error("read file failed",e);
}
return null;
}
@ -183,12 +202,20 @@ public class LoggerServer {
StringBuilder sb = new StringBuilder();
try {
br = new BufferedReader(new InputStreamReader(new FileInputStream(path)));
boolean errorLineFlag = false;
while ((line = br.readLine()) != null){
sb.append(line + "\r\n");
if (line.contains("TaskLogger")){
errorLineFlag = filterLine(path,line);
}
if (!errorLineFlag || !line.contains("TaskLogger")){
sb.append(line + "\r\n");
}
}
return sb.toString();
}catch (IOException e){
logger.error("read file failed : " + e.getMessage(),e);
logger.error("read file failed",e);
}finally {
try {
if (br != null){
@ -201,4 +228,21 @@ public class LoggerServer {
return null;
}
/**
*
* @param path
* @param line
* @return
*/
private static boolean filterLine(String path,String line){
String removeSuffix = path.substring(0, path.length() - 4);
String[] strArrs = removeSuffix.split("/");
String taskAppId = String.format("%s_%s_%s",
strArrs[strArrs.length - 3],
strArrs[strArrs.length-2],
strArrs[strArrs.length - 1]);
return !line.contains(taskAppId);
}
}

1
escheduler-server/src/main/java/cn/escheduler/server/utils/ParamUtils.java

@ -44,7 +44,6 @@ public class ParamUtils {
CommandType commandType,
Date scheduleTime){
if (globalParams == null
&& globalParams == null
&& localParams == null){
return null;
}

21
escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java

@ -119,7 +119,7 @@ public class WorkerServer implements IStoppable {
try {
conf = new PropertiesConfiguration(Constants.WORKER_PROPERTIES_PATH);
}catch (ConfigurationException e){
logger.error("load configuration failed : " + e.getMessage(),e);
logger.error("load configuration failed",e);
System.exit(1);
}
}
@ -167,13 +167,12 @@ public class WorkerServer implements IStoppable {
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
@Override
public void run() {
String host = OSUtils.getHost();
// clear worker table register info
serverDao.deleteWorker(host);
logger.info("worker server stopped");
logger.warn("worker server stopped");
// worker server exit alert
if (zkWorkerClient.getActiveMasterNum() <= 1) {
for (int i = 0; i < Constants.ESCHEDULER_WARN_TIMES_FAILOVER;i++) {
alertDao.sendServerStopedAlert(1, host, "Worker-Server");
alertDao.sendServerStopedAlert(1, OSUtils.getHost(), "Worker-Server");
}
}
@ -231,7 +230,7 @@ public class WorkerServer implements IStoppable {
@Override
public void run() {
// send heartbeat to zk
if (StringUtils.isBlank(zkWorkerClient.getWorkerZNode())){
if (StringUtils.isEmpty(zkWorkerClient.getWorkerZNode())){
logger.error("worker send heartbeat to zk failed");
}
@ -266,14 +265,8 @@ public class WorkerServer implements IStoppable {
continue;
}else {
int taskInstId=Integer.parseInt(taskInfoArr[1]);
TaskInstance taskInstance = processDao.getTaskInstanceRelationByTaskId(taskInstId);
TaskInstance taskInstance = processDao.findTaskInstanceById(taskInstId);
ProcessInstance instance = processDao.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
if(instance != null){
taskInstance.setProcessInstance(instance);
}
if(taskInstance.getTaskType().equals(TaskType.DEPENDENT.toString())){
taskInstance.setState(ExecutionStatus.KILL);
processDao.saveTaskInstance(taskInstance);

72
escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java

@ -173,30 +173,30 @@ public class FetchTaskThread implements Runnable{
}
// get task instance id
taskInstId = Integer.parseInt(taskQueueStr.split(Constants.UNDERLINE)[3]);
taskInstId = getTaskInstanceId(taskQueueStr);
// get task instance relation
taskInstance = processDao.getTaskInstanceRelationByTaskId(taskInstId);
Tenant tenant = processDao.getTenantForProcess(taskInstance.getProcessInstance().getTenantId(),
taskInstance.getProcessDefine().getUserId());
if(tenant == null){
logger.error("tenant not exists,process define id : {},process instance id : {},task instance id : {}",
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId());
taskQueue.removeNode(Constants.SCHEDULER_TASKS_QUEUE, taskQueueStr);
// verify tenant is null
if (verifyTenantIsNull(taskQueueStr, tenant)) {
continue;
}
// set queue for process instance
taskInstance.getProcessInstance().setQueue(tenant.getQueue());
logger.info("worker fetch taskId : {} from queue ", taskInstId);
// mainly to wait for the master insert task to succeed
waitForMasterEnterQueue();
if (taskInstance == null ) {
logger.error("task instance is null. task id : {} ", taskInstId);
taskQueue.removeNode(Constants.SCHEDULER_TASKS_QUEUE, taskQueueStr);
// verify task instance is null
if (verifyTaskInstanceIsNull(taskQueueStr)) {
continue;
}
@ -204,16 +204,18 @@ public class FetchTaskThread implements Runnable{
continue;
}
// get local execute path
logger.info("task instance local execute path : {} ", getExecLocalPath());
// local execute path
String execLocalPath = getExecLocalPath();
logger.info("task instance local execute path : {} ", execLocalPath);
// init task
taskInstance.init(OSUtils.getHost(),
new Date(),
getExecLocalPath());
execLocalPath);
// check and create Linux users
FileUtils.createWorkDirAndUserIfAbsent(getExecLocalPath(),
FileUtils.createWorkDirAndUserIfAbsent(execLocalPath,
tenant.getTenantCode(), logger);
logger.info("task : {} ready to submit to task scheduler thread",taskInstId);
@ -232,6 +234,38 @@ public class FetchTaskThread implements Runnable{
}
}
/**
* verify task instance is null
* @param taskQueueStr
* @return
*/
private boolean verifyTaskInstanceIsNull(String taskQueueStr) {
if (taskInstance == null ) {
logger.error("task instance is null. task id : {} ", taskInstId);
taskQueue.removeNode(Constants.SCHEDULER_TASKS_QUEUE, taskQueueStr);
return true;
}
return false;
}
/**
* verify tenant is null
* @param taskQueueStr
* @param tenant
* @return
*/
private boolean verifyTenantIsNull(String taskQueueStr, Tenant tenant) {
if(tenant == null){
logger.error("tenant not exists,process define id : {},process instance id : {},task instance id : {}",
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId());
taskQueue.removeNode(Constants.SCHEDULER_TASKS_QUEUE, taskQueueStr);
return true;
}
return false;
}
/**
* get execute local path
* @return
@ -274,4 +308,14 @@ public class FetchTaskThread implements Runnable{
retryTimes--;
}
}
/**
* get task instance id
*
* @param taskQueueStr
* @return
*/
private int getTaskInstanceId(String taskQueueStr){
return Integer.parseInt(taskQueueStr.split(Constants.UNDERLINE)[3]);
}
}

2
escheduler-server/src/main/java/cn/escheduler/server/worker/runner/TaskScheduleThread.java

@ -133,7 +133,7 @@ public class TaskScheduleThread implements Runnable {
taskInstance.getId(),
CommonUtils.getSystemEnvPath(),
tenant.getTenantCode(),
tenant.getQueueName(),
tenant.getQueue(),
taskInstance.getStartTime(),
getGlobalParamsMap(),
taskInstance.getDependency(),

Loading…
Cancel
Save