diff --git a/dockerfile/dockerfile-1.1.x/Dockerfile b/dockerfile/Dockerfile similarity index 100% rename from dockerfile/dockerfile-1.1.x/Dockerfile rename to dockerfile/Dockerfile diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert.properties b/dockerfile/conf/escheduler/conf/alert.properties similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert.properties rename to dockerfile/conf/escheduler/conf/alert.properties diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert_logback.xml b/dockerfile/conf/escheduler/conf/alert_logback.xml similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert_logback.xml rename to dockerfile/conf/escheduler/conf/alert_logback.xml diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/apiserver_logback.xml b/dockerfile/conf/escheduler/conf/apiserver_logback.xml similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/apiserver_logback.xml rename to dockerfile/conf/escheduler/conf/apiserver_logback.xml diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application.properties b/dockerfile/conf/escheduler/conf/application.properties similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application.properties rename to dockerfile/conf/escheduler/conf/application.properties diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application_master.properties b/dockerfile/conf/escheduler/conf/application_master.properties similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application_master.properties rename to dockerfile/conf/escheduler/conf/application_master.properties diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/common.properties b/dockerfile/conf/escheduler/conf/common/common.properties similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/common.properties rename to dockerfile/conf/escheduler/conf/common/common.properties diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/hadoop/hadoop.properties b/dockerfile/conf/escheduler/conf/common/hadoop/hadoop.properties similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/common/hadoop/hadoop.properties rename to dockerfile/conf/escheduler/conf/common/hadoop/hadoop.properties diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/install_config.conf b/dockerfile/conf/escheduler/conf/config/install_config.conf similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/install_config.conf rename to dockerfile/conf/escheduler/conf/config/install_config.conf diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/run_config.conf b/dockerfile/conf/escheduler/conf/config/run_config.conf similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/config/run_config.conf rename to dockerfile/conf/escheduler/conf/config/run_config.conf diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/dao/data_source.properties b/dockerfile/conf/escheduler/conf/dao/data_source.properties similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/dao/data_source.properties rename to dockerfile/conf/escheduler/conf/dao/data_source.properties diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/env/.escheduler_env.sh b/dockerfile/conf/escheduler/conf/env/.escheduler_env.sh similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/env/.escheduler_env.sh rename to dockerfile/conf/escheduler/conf/env/.escheduler_env.sh diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages.properties b/dockerfile/conf/escheduler/conf/i18n/messages.properties similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages.properties rename to dockerfile/conf/escheduler/conf/i18n/messages.properties diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_en_US.properties b/dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_en_US.properties rename to dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_zh_CN.properties b/dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/i18n/messages_zh_CN.properties rename to dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl b/dockerfile/conf/escheduler/conf/mail_templates/alert_mail_template.ftl similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl rename to dockerfile/conf/escheduler/conf/mail_templates/alert_mail_template.ftl diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master.properties b/dockerfile/conf/escheduler/conf/master.properties similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master.properties rename to dockerfile/conf/escheduler/conf/master.properties diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master_logback.xml b/dockerfile/conf/escheduler/conf/master_logback.xml similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/master_logback.xml rename to dockerfile/conf/escheduler/conf/master_logback.xml diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/quartz.properties b/dockerfile/conf/escheduler/conf/quartz.properties similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/quartz.properties rename to dockerfile/conf/escheduler/conf/quartz.properties diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker.properties b/dockerfile/conf/escheduler/conf/worker.properties similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker.properties rename to dockerfile/conf/escheduler/conf/worker.properties diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker_logback.xml b/dockerfile/conf/escheduler/conf/worker_logback.xml similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/escheduler/conf/worker_logback.xml rename to dockerfile/conf/escheduler/conf/worker_logback.xml diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/zookeeper.properties b/dockerfile/conf/escheduler/conf/zookeeper.properties similarity index 100% rename from dockerfile/dockerfile-1.1.x/conf/escheduler/conf/zookeeper.properties rename to dockerfile/conf/escheduler/conf/zookeeper.properties diff --git a/dockerfile/dockerfile-1.0.x/conf/maven/settings.xml b/dockerfile/conf/maven/settings.xml similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/maven/settings.xml rename to dockerfile/conf/maven/settings.xml diff --git a/dockerfile/dockerfile-1.0.x/conf/nginx/default.conf b/dockerfile/conf/nginx/default.conf similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/nginx/default.conf rename to dockerfile/conf/nginx/default.conf diff --git a/dockerfile/dockerfile-1.0.x/conf/zookeeper/zoo.cfg b/dockerfile/conf/zookeeper/zoo.cfg similarity index 100% rename from dockerfile/dockerfile-1.0.x/conf/zookeeper/zoo.cfg rename to dockerfile/conf/zookeeper/zoo.cfg diff --git a/dockerfile/dockerfile-1.0.x/Dockerfile b/dockerfile/dockerfile-1.0.x/Dockerfile deleted file mode 100644 index cebf7d5114..0000000000 --- a/dockerfile/dockerfile-1.0.x/Dockerfile +++ /dev/null @@ -1,145 +0,0 @@ -FROM ubuntu:18.04 - -MAINTAINER journey "825193156@qq.com" - -ENV LANG=C.UTF-8 - -ARG version - -#1,安装jdk - -RUN apt-get update \ - && apt-get -y install openjdk-8-jdk \ - && rm -rf /var/lib/apt/lists/* - -ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 -ENV PATH $JAVA_HOME/bin:$PATH - - -#安装wget -RUN apt-get update && \ - apt-get -y install wget -#2,安装ZK -#RUN cd /opt && \ -# wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz && \ -# tar -zxvf zookeeper-3.4.6.tar.gz && \ -# mv zookeeper-3.4.6 zookeeper && \ -# rm -rf ./zookeeper-*tar.gz && \ -# mkdir -p /tmp/zookeeper && \ -# rm -rf /opt/zookeeper/conf/zoo_sample.cfg - -RUN cd /opt && \ - wget https://www-us.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz && \ - tar -zxvf zookeeper-3.4.14.tar.gz && \ - mv zookeeper-3.4.14 zookeeper && \ - rm -rf ./zookeeper-*tar.gz && \ - mkdir -p /tmp/zookeeper && \ - rm -rf /opt/zookeeper/conf/zoo_sample.cfg - -ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf -ENV ZK_HOME=/opt/zookeeper -ENV PATH $PATH:$ZK_HOME/bin - -#3,安装maven -RUN cd /opt && \ - wget http://apache-mirror.rbc.ru/pub/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz && \ - tar -zxvf apache-maven-3.3.9-bin.tar.gz && \ - mv apache-maven-3.3.9 maven && \ - rm -rf ./apache-maven-*tar.gz && \ - rm -rf /opt/maven/conf/settings.xml -ADD ./conf/maven/settings.xml /opt/maven/conf -ENV MAVEN_HOME=/opt/maven -ENV PATH $PATH:$MAVEN_HOME/bin - -#4,安装node -RUN cd /opt && \ - wget https://nodejs.org/download/release/v8.9.4/node-v8.9.4-linux-x64.tar.gz && \ - tar -zxvf node-v8.9.4-linux-x64.tar.gz && \ - mv node-v8.9.4-linux-x64 node && \ - rm -rf ./node-v8.9.4-*tar.gz -ENV NODE_HOME=/opt/node -ENV PATH $PATH:$NODE_HOME/bin - -#5,下载escheduler -RUN cd /opt && \ - wget https://github.com/analysys/EasyScheduler/archive/${version}.tar.gz && \ - tar -zxvf ${version}.tar.gz && \ - mv EasyScheduler-${version} easyscheduler_source && \ - rm -rf ./${version}.tar.gz - -#6,后端编译 -RUN cd /opt/easyscheduler_source && \ - mvn -U clean package assembly:assembly -Dmaven.test.skip=true - -#7,前端编译 -RUN chmod -R 777 /opt/easyscheduler_source/escheduler-ui && \ - cd /opt/easyscheduler_source/escheduler-ui && \ - rm -rf /opt/easyscheduler_source/escheduler-ui/node_modules && \ - npm install node-sass --unsafe-perm && \ - npm install && \ - npm run build -#8,安装mysql -RUN echo "deb http://cn.archive.ubuntu.com/ubuntu/ xenial main restricted universe multiverse" >> /etc/apt/sources.list - -RUN echo "mysql-server mysql-server/root_password password root" | debconf-set-selections -RUN echo "mysql-server mysql-server/root_password_again password root" | debconf-set-selections - -RUN apt-get update && \ - apt-get -y install mysql-server-5.7 && \ - mkdir -p /var/lib/mysql && \ - mkdir -p /var/run/mysqld && \ - mkdir -p /var/log/mysql && \ - chown -R mysql:mysql /var/lib/mysql && \ - chown -R mysql:mysql /var/run/mysqld && \ - chown -R mysql:mysql /var/log/mysql - - -# UTF-8 and bind-address -RUN sed -i -e "$ a [client]\n\n[mysql]\n\n[mysqld]" /etc/mysql/my.cnf && \ - sed -i -e "s/\(\[client\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \ - sed -i -e "s/\(\[mysql\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \ - sed -i -e "s/\(\[mysqld\]\)/\1\ninit_connect='SET NAMES utf8'\ncharacter-set-server = utf8\ncollation-server=utf8_general_ci\nbind-address = 0.0.0.0/g" /etc/mysql/my.cnf - - -#9,安装nginx -RUN apt-get update && \ - apt-get install -y nginx && \ - rm -rf /var/lib/apt/lists/* && \ - echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \ - chown -R www-data:www-data /var/lib/nginx - -#10,修改escheduler配置文件 -#后端配置 -RUN mkdir -p /opt/escheduler && \ - tar -zxvf /opt/easyscheduler_source/target/escheduler-${version}.tar.gz -C /opt/escheduler && \ - rm -rf /opt/escheduler/conf -ADD ./conf/escheduler/conf /opt/escheduler/conf -#前端nginx配置 -ADD ./conf/nginx/default.conf /etc/nginx/conf.d - -#11,开放端口 -EXPOSE 2181 2888 3888 3306 80 12345 8888 - -#12,安装sudo,python,vim,ping和ssh -RUN apt-get update && \ - apt-get -y install sudo && \ - apt-get -y install python && \ - apt-get -y install vim && \ - apt-get -y install iputils-ping && \ - apt-get -y install net-tools && \ - apt-get -y install openssh-server && \ - apt-get -y install python-pip && \ - pip install kazoo - -COPY ./startup.sh /root/startup.sh -#13,修改权限和设置软连 -RUN chmod +x /root/startup.sh && \ - chmod +x /opt/escheduler/script/create_escheduler.sh && \ - chmod +x /opt/zookeeper/bin/zkServer.sh && \ - chmod +x /opt/escheduler/bin/escheduler-daemon.sh && \ - rm -rf /bin/sh && \ - ln -s /bin/bash /bin/sh && \ - mkdir -p /tmp/xls - - -ENTRYPOINT ["/root/startup.sh"] diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties deleted file mode 100644 index 0b97c0d397..0000000000 --- a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/alert.properties +++ /dev/null @@ -1,21 +0,0 @@ -#alert type is EMAIL/SMS -alert.type=EMAIL - -# mail server configuration -mail.protocol=SMTP -mail.server.host=smtp.office365.com -mail.server.port=587 -mail.sender=qiaozhanwei@outlook.com -mail.passwd=eschedulerBJEG - -# TLS -mail.smtp.starttls.enable=true -# SSL -mail.smtp.ssl.enable=false - -#xls file path,need create if not exist -xls.file.path=/tmp/xls - - - - diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties deleted file mode 100644 index bf003bfdda..0000000000 --- a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/application.properties +++ /dev/null @@ -1,16 +0,0 @@ -# server port -server.port=12345 - -# session config -server.session.timeout=7200 - - -server.context-path=/escheduler/ - -# file size limit for upload -spring.http.multipart.max-file-size=1024MB -spring.http.multipart.max-request-size=1024MB - -#post content -server.max-http-post-size=5000000 - diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties deleted file mode 100644 index 698c27d446..0000000000 --- a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/common.properties +++ /dev/null @@ -1,27 +0,0 @@ -#task queue implementation, default "zookeeper" -escheduler.queue.impl=zookeeper - -# user data directory path, self configuration, please make sure the directory exists and have read write permissions -data.basedir.path=/tmp/escheduler - -# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions -data.download.basedir.path=/tmp/escheduler/download - -# process execute directory. self configuration, please make sure the directory exists and have read write permissions -process.exec.basepath=/tmp/escheduler/exec - -# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended -data.store2hdfs.basepath=/escheduler - -# whether hdfs starts -hdfs.startup.state=false - -# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions -escheduler.env.path=/opt/escheduler/conf/env/.escheduler_env.sh - -#resource.view.suffixs -resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml - -# is development state? default "false" -development.state=false - diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties deleted file mode 100644 index f210ae7533..0000000000 --- a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/common/hadoop/hadoop.properties +++ /dev/null @@ -1,8 +0,0 @@ -# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory -fs.defaultFS=hdfs://mycluster:8020 - -#resourcemanager ha note this need ips , this empty if single -yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx - -# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine -yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s \ No newline at end of file diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh deleted file mode 100644 index 5b4c3e1b8d..0000000000 --- a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/env/.escheduler_env.sh +++ /dev/null @@ -1,4 +0,0 @@ -export PYTHON_HOME=/usr/bin/python -export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 - -export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH \ No newline at end of file diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties deleted file mode 100644 index 3479379418..0000000000 --- a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/quartz.properties +++ /dev/null @@ -1,39 +0,0 @@ -#============================================================================ -# Configure Main Scheduler Properties -#============================================================================ -org.quartz.scheduler.instanceName = EasyScheduler -org.quartz.scheduler.instanceId = AUTO -org.quartz.scheduler.makeSchedulerThreadDaemon = true -org.quartz.jobStore.useProperties = false - -#============================================================================ -# Configure ThreadPool -#============================================================================ - -org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool -org.quartz.threadPool.makeThreadsDaemons = true -org.quartz.threadPool.threadCount = 25 -org.quartz.threadPool.threadPriority = 5 - -#============================================================================ -# Configure JobStore -#============================================================================ - -org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX -org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate -org.quartz.jobStore.tablePrefix = QRTZ_ -org.quartz.jobStore.isClustered = true -org.quartz.jobStore.misfireThreshold = 60000 -org.quartz.jobStore.clusterCheckinInterval = 5000 -org.quartz.jobStore.dataSource = myDs - -#============================================================================ -# Configure Datasources -#============================================================================ - -org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver -org.quartz.dataSource.myDs.URL = jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=utf8 -org.quartz.dataSource.myDs.user = root -org.quartz.dataSource.myDs.password = root@123 -org.quartz.dataSource.myDs.maxConnections = 10 -org.quartz.dataSource.myDs.validationQuery = select 1 diff --git a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties b/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties deleted file mode 100644 index 092688a153..0000000000 --- a/dockerfile/dockerfile-1.0.x/conf/escheduler/conf/zookeeper.properties +++ /dev/null @@ -1,25 +0,0 @@ -#zookeeper cluster -zookeeper.quorum=127.0.0.1:2181 - -#escheduler root directory -zookeeper.escheduler.root=/escheduler - -#zookeeper server dirctory -zookeeper.escheduler.dead.servers=/escheduler/dead-servers -zookeeper.escheduler.masters=/escheduler/masters -zookeeper.escheduler.workers=/escheduler/workers - -#zookeeper lock dirctory -zookeeper.escheduler.lock.masters=/escheduler/lock/masters -zookeeper.escheduler.lock.workers=/escheduler/lock/workers - -#escheduler failover directory -zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters -zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers -zookeeper.escheduler.lock.failover.startup.masters=/escheduler/lock/failover/startup-masters - -#escheduler failover directory -zookeeper.session.timeout=300 -zookeeper.connection.timeout=300 -zookeeper.retry.sleep=1000 -zookeeper.retry.maxtime=5 diff --git a/dockerfile/dockerfile-1.0.x/hooks/build b/dockerfile/dockerfile-1.0.x/hooks/build deleted file mode 100644 index aebf6e4b23..0000000000 --- a/dockerfile/dockerfile-1.0.x/hooks/build +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -echo "------ escheduler start - build -------" -printenv - -docker build --build-arg version=$version -t $DOCKER_REPO:$version . - -echo "------ escheduler end - build -------" diff --git a/dockerfile/dockerfile-1.0.x/startup.sh b/dockerfile/dockerfile-1.0.x/startup.sh deleted file mode 100644 index 5ed150e1ba..0000000000 --- a/dockerfile/dockerfile-1.0.x/startup.sh +++ /dev/null @@ -1,79 +0,0 @@ -#! /bin/bash - -set -e -if [ `netstat -anop|grep mysql|wc -l` -gt 0 ];then - echo "MySQL is Running." -else - MYSQL_ROOT_PWD="root@123" - ESZ_DB="escheduler" - echo "启动mysql服务" - chown -R mysql:mysql /var/lib/mysql /var/run/mysqld - find /var/lib/mysql -type f -exec touch {} \; && service mysql restart $ sleep 10 - if [ ! -f /nohup.out ];then - echo "设置mysql密码" - mysql --user=root --password=root -e "UPDATE mysql.user set authentication_string=password('$MYSQL_ROOT_PWD') where user='root'; FLUSH PRIVILEGES;" - - echo "设置mysql权限" - mysql --user=root --password=$MYSQL_ROOT_PWD -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '$MYSQL_ROOT_PWD' WITH GRANT OPTION; FLUSH PRIVILEGES;" - echo "创建escheduler数据库" - mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;" - echo "导入mysql数据" - nohup /opt/escheduler/script/create_escheduler.sh & - fi - - if [ `mysql --user=root --password=$MYSQL_ROOT_PWD -s -r -e "SELECT count(TABLE_NAME) FROM information_schema.TABLES WHERE TABLE_SCHEMA='escheduler';" | grep -v count` -eq 38 ];then - echo "\`$ESZ_DB\` 表个数正确" - else - echo "\`$ESZ_DB\` 表个数不正确" - mysql --user=root --password=$MYSQL_ROOT_PWD -e "DROP DATABASE \`$ESZ_DB\`;" - echo "创建escheduler数据库" - mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;" - echo "导入mysql数据" - nohup /opt/escheduler/script/create_escheduler.sh & - fi -fi - -/opt/zookeeper/bin/zkServer.sh restart - -sleep 10 - -echo "启动api-server" -/opt/escheduler/bin/escheduler-daemon.sh stop api-server -/opt/escheduler/bin/escheduler-daemon.sh start api-server - - - -echo "启动master-server" -/opt/escheduler/bin/escheduler-daemon.sh stop master-server -python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/masters -/opt/escheduler/bin/escheduler-daemon.sh start master-server - -echo "启动worker-server" -/opt/escheduler/bin/escheduler-daemon.sh stop worker-server -python /opt/escheduler/script/del_zk_node.py 127.0.0.1 /escheduler/workers -/opt/escheduler/bin/escheduler-daemon.sh start worker-server - - -echo "启动logger-server" -/opt/escheduler/bin/escheduler-daemon.sh stop logger-server -/opt/escheduler/bin/escheduler-daemon.sh start logger-server - - -echo "启动alert-server" -/opt/escheduler/bin/escheduler-daemon.sh stop alert-server -/opt/escheduler/bin/escheduler-daemon.sh start alert-server - - - - - -echo "启动nginx" -/etc/init.d/nginx stop -nginx & - - -while true -do - sleep 101 -done -exec "$@" diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml deleted file mode 100644 index c4ca8e9d1f..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/alert_logback.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - - - - ${log.base}/escheduler-alert.log - - ${log.base}/escheduler-alert.%d{yyyy-MM-dd_HH}.%i.log - 20 - 64MB - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - - - - - - \ No newline at end of file diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml deleted file mode 100644 index 43e6af951a..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/apiserver_logback.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - - - - - - INFO - - ${log.base}/escheduler-api-server.log - - ${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log - 168 - 64MB - - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - - - - - - - \ No newline at end of file diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties deleted file mode 100644 index cc4774ae94..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/application_master.properties +++ /dev/null @@ -1 +0,0 @@ -logging.config=classpath:master_logback.xml diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf deleted file mode 100644 index 43b955d4f1..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/install_config.conf +++ /dev/null @@ -1,3 +0,0 @@ -installPath=/data1_1T/escheduler -deployUser=escheduler -ips=ark0,ark1,ark2,ark3,ark4 diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf deleted file mode 100644 index f4cfd832c4..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/config/run_config.conf +++ /dev/null @@ -1,4 +0,0 @@ -masters=ark0,ark1 -workers=ark2,ark3,ark4 -alertServer=ark3 -apiServers=ark1 \ No newline at end of file diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties deleted file mode 100644 index 0dce2943e4..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/dao/data_source.properties +++ /dev/null @@ -1,53 +0,0 @@ -# base spring data source configuration -spring.datasource.type=com.alibaba.druid.pool.DruidDataSource -spring.datasource.driver-class-name=com.mysql.jdbc.Driver -spring.datasource.url=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=UTF-8 -spring.datasource.username=root -spring.datasource.password=root@123 - -# connection configuration -spring.datasource.initialSize=5 -# min connection number -spring.datasource.minIdle=5 -# max connection number -spring.datasource.maxActive=50 - -# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. -# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. -spring.datasource.maxWait=60000 - -# milliseconds for check to close free connections -spring.datasource.timeBetweenEvictionRunsMillis=60000 - -# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. -spring.datasource.timeBetweenConnectErrorMillis=60000 - -# the longest time a connection remains idle without being evicted, in milliseconds -spring.datasource.minEvictableIdleTimeMillis=300000 - -#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. -spring.datasource.validationQuery=SELECT 1 -#check whether the connection is valid for timeout, in seconds -spring.datasource.validationQueryTimeout=3 - -# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis, -# validation Query is performed to check whether the connection is valid -spring.datasource.testWhileIdle=true - -#execute validation to check if the connection is valid when applying for a connection -spring.datasource.testOnBorrow=true -#execute validation to check if the connection is valid when the connection is returned -spring.datasource.testOnReturn=false -spring.datasource.defaultAutoCommit=true -spring.datasource.keepAlive=true - -# open PSCache, specify count PSCache for every connection -spring.datasource.poolPreparedStatements=true -spring.datasource.maxPoolPreparedStatementPerConnectionSize=20 - -# data quality analysis is not currently in use. please ignore the following configuration -# task record flag -task.record.flag=false -task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8 -task.record.datasource.username=xx -task.record.datasource.password=xx diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl deleted file mode 100644 index 0ff763fa28..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/mail_templates/alert_mail_template.ftl +++ /dev/null @@ -1 +0,0 @@ - easyscheduler<#if title??> ${title}<#if content??> ${content}
\ No newline at end of file diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties deleted file mode 100644 index 9080defc7b..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master.properties +++ /dev/null @@ -1,21 +0,0 @@ -# master execute thread num -master.exec.threads=100 - -# master execute task number in parallel -master.exec.task.number=20 - -# master heartbeat interval -master.heartbeat.interval=10 - -# master commit task retry times -master.task.commit.retryTimes=5 - -# master commit task interval -master.task.commit.interval=100 - - -# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 -master.max.cpuload.avg=10 - -# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G. -master.reserved.memory=1 diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml deleted file mode 100644 index d93878218e..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/master_logback.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - - - - ${log.base}/escheduler-master.log - - INFO - - - ${log.base}/escheduler-master.%d{yyyy-MM-dd_HH}.%i.log - 168 - 200MB - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - - - - - - \ No newline at end of file diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties deleted file mode 100644 index e58bd86dcf..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker.properties +++ /dev/null @@ -1,15 +0,0 @@ -# worker execute thread num -worker.exec.threads=100 - -# worker heartbeat interval -worker.heartbeat.interval=10 - -# submit the number of tasks at a time -worker.fetch.task.num = 3 - - -# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 -#worker.max.cpuload.avg=10 - -# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G. -worker.reserved.memory=1 \ No newline at end of file diff --git a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml b/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml deleted file mode 100644 index 32914ec84f..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/escheduler/conf/worker_logback.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - - - - INFO - - - ${log.base}/{processDefinitionId}/{processInstanceId}/{taskInstanceId}.log - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - true - - - - ${log.base}/escheduler-worker.log - - INFO - - - - ${log.base}/escheduler-worker.%d{yyyy-MM-dd_HH}.%i.log - 168 - 200MB - -       - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - -    - - - - - - - - \ No newline at end of file diff --git a/dockerfile/dockerfile-1.1.x/conf/maven/settings.xml b/dockerfile/dockerfile-1.1.x/conf/maven/settings.xml deleted file mode 100644 index 6bdea4a1bf..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/maven/settings.xml +++ /dev/null @@ -1,263 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - nexus-aliyun - central - Nexus aliyun - http://maven.aliyun.com/nexus/content/groups/public - - - - - - - - - - - - diff --git a/dockerfile/dockerfile-1.1.x/conf/nginx/default.conf b/dockerfile/dockerfile-1.1.x/conf/nginx/default.conf deleted file mode 100644 index 2d43c32b63..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/nginx/default.conf +++ /dev/null @@ -1,31 +0,0 @@ -server { - listen 8888; - server_name localhost; - #charset koi8-r; - #access_log /var/log/nginx/host.access.log main; - location / { - root /opt/easyscheduler_source/escheduler-ui/dist; - index index.html index.html; - } - location /escheduler { - proxy_pass http://127.0.0.1:12345; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header x_real_ipP $remote_addr; - proxy_set_header remote_addr $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_http_version 1.1; - proxy_connect_timeout 300s; - proxy_read_timeout 300s; - proxy_send_timeout 300s; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - } - #error_page 404 /404.html; - # redirect server error pages to the static page /50x.html - # - error_page 500 502 503 504 /50x.html; - location = /50x.html { - root /usr/share/nginx/html; - } -} diff --git a/dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg b/dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg deleted file mode 100644 index a5a2c0bbe3..0000000000 --- a/dockerfile/dockerfile-1.1.x/conf/zookeeper/zoo.cfg +++ /dev/null @@ -1,28 +0,0 @@ -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=10 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=5 -# the directory where the snapshot is stored. -# do not use /tmp for storage, /tmp here is just -# example sakes. -dataDir=/tmp/zookeeper -# the port at which the clients will connect -clientPort=2181 -# the maximum number of client connections. -# increase this if you need to handle more clients -#maxClientCnxns=60 -# -# Be sure to read the maintenance section of the -# administrator guide before turning on autopurge. -# -# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance -# -# The number of snapshots to retain in dataDir -#autopurge.snapRetainCount=3 -# Purge task interval in hours -# Set to "0" to disable auto purge feature -#autopurge.purgeInterval=1 diff --git a/dockerfile/dockerfile-1.1.x/hooks/push b/dockerfile/dockerfile-1.1.x/hooks/push deleted file mode 100644 index 7b98da1a8d..0000000000 --- a/dockerfile/dockerfile-1.1.x/hooks/push +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -echo "------ push start -------" -printenv - -docker push $DOCKER_REPO:$version - -echo "------ push end -------" diff --git a/dockerfile/dockerfile-1.1.x/hooks/build b/dockerfile/hooks/build similarity index 100% rename from dockerfile/dockerfile-1.1.x/hooks/build rename to dockerfile/hooks/build diff --git a/dockerfile/dockerfile-1.0.x/hooks/push b/dockerfile/hooks/push similarity index 100% rename from dockerfile/dockerfile-1.0.x/hooks/push rename to dockerfile/hooks/push diff --git a/dockerfile/dockerfile-1.1.x/startup.sh b/dockerfile/startup.sh similarity index 98% rename from dockerfile/dockerfile-1.1.x/startup.sh rename to dockerfile/startup.sh index 2d40fb5071..8aed67f394 100644 --- a/dockerfile/dockerfile-1.1.x/startup.sh +++ b/dockerfile/startup.sh @@ -30,13 +30,14 @@ else echo "创建escheduler数据库" mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;" echo "导入mysql数据" - nohup /opt/escheduler/script/create_escheduler.sh & + nohup /opt/escheduler/script/create_escheduler.sh & + sleep 90 fi fi /opt/zookeeper/bin/zkServer.sh restart -sleep 10 +sleep 90 echo "启动api-server" /opt/escheduler/bin/escheduler-daemon.sh stop api-server diff --git a/docs/en_US/EasyScheduler Proposal.md b/docs/en_US/EasyScheduler Proposal.md index 17f1109c9a..40da17f0db 100644 --- a/docs/en_US/EasyScheduler Proposal.md +++ b/docs/en_US/EasyScheduler Proposal.md @@ -2,28 +2,20 @@ ## Abstract -EasyScheduler is a distributed visual workflow scheduling system, which focuses on solving the problem of "complex task dependencies" in data processing. just like its name, we dedicated to making the scheduling system `out of the box` . +EasyScheduler is a distributed ETL scheduling engine with powerful DAG visualization interface. EasyScheduler focuses on solving the problem of 'complex task dependencies & triggers ' in data processing. Just like its name, we dedicated to making the scheduling system `out of the box`. ## Proposal -EasyScheduler provides many easy-to-use features to simplify the use of data processing workflow,We propose the clear concept of "instance of process" and "instance of task" to make it more convenient to get the running state of workflow every time. its main objectives are as follows: - -- Associate the tasks according to the dependencies of the tasks in a DAG graph, which can visualize the running state of task in real time. -- Support for many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc. -- Support process scheduling, dependency scheduling, manual scheduling, manual pause/stop/recovery, support failed retry/alarm, recovery from specified nodes, kill task, etc. -- Support process priority, task priority and task failover and task timeout alarm/failure -- Support process global parameters and node custom parameter settings -- Support online upload/download of resource files, management, etc. Support online file creation and editing -- Support task log online viewing and scrolling, online download log, etc. -- Implement cluster HA, decentralize Master cluster and Worker cluster through Zookeeper -- Support online viewing of `Master/Worker` cpu load, memory -- Support process running history tree/gantt chart display, support task status statistics, process status statistics -- Support backfilling data -- Support multi-tenant -- Easy to maintain - -for now, EasyScheduler has a fairly huge community in China. -It is also widely adopted by many [companies and organizations](https://github.com/analysys/EasyScheduler/issues/57) as an ETL scheduling tool. +EasyScheduler provides many easy-to-use features to accelerate the engineer enficiency on data ETL workflow job. We propose a new concept of 'instance of process' and 'instance of task' to let developers to tuning their jobs on the running state of workflow instead of changing the task's template. Its main objectives are as follows: + +- Define the complex tasks' dependencies & triggers in a DAG graph by dragging and dropping. +- Support cluster HA. +- Support multi-tenant and parallel or serial backfilling data. +- Support automatical failure job retry and recovery. +- Support many data task types and process priority, task priority and relative task timeout alarm. + +For now, EasyScheduler has a fairly huge community in China. +It is also widely adopted by many [companies and organizations](https://github.com/analysys/EasyScheduler/issues/57) as its ETL scheduling tool. We believe that bringing EasyScheduler into ASF could advance development of a much more stronger and more diverse open source community. @@ -38,15 +30,15 @@ The codes are already under Apache License Version 2.0. We want to find a data processing tool with the following features: -- easy to use,It can be assembled into a process with a very simple drag and drop operation. not only for developers,people who can't write code also can use -- solving the problem of "complex task dependencies" , and can monitor the running status -- support multi-tenant -- support many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc. -- linear scalability +- Easy to use,developers can build a ETL process with a very simple drag and drop operation. not only for ETL developers,people who can't write code also can use this tool for ETL operation such as system adminitrator. +- Solving the problem of "complex task dependencies" , and it can monitor the ETL running status. +- Support multi-tenant. +- Support many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc. +- Support HA and linear scalability. -For the above reasons, we realized that no existing product met our exact requirements externally, so we decided to develop it ourselves.EasyScheduler completed the architecture design at the end of 2017. The first internal use version was completed in May 2018. We then iterated several versions and the system gradually stabilized. +For the above reasons, we realized that no existing product met our requirements, so we decided to develop this tool ourselves. We designed EasyScheduler at the end of 2017. The first internal use version was completed in May 2018. We then iterated several internal versions and the system gradually became stabilized. -EasyScheduler won the `GVP` (Gitee Most Valuable Project) in April 2019 +Then we open the source code of EasyScheduler on March 2019. It soon gained lot's of ETL developers interest and stars on github. Then it won the `GVP` (Gitee Most Valuable Project) in April 2019 and our key member was invited to GAIC Summit 2019 for speech on June 2019. ## Rationale @@ -80,9 +72,7 @@ Thus, it is very unlikely that EasyScheduler becomes orphaned. ### Inexperience with Open Source -The core developers are all active users and followers of open source. They are already committers and contributors to the EasyScheduler Github project. All have been involved with the source code that has been released under an open source license, and several of them also have experience developing code in an open source environment, they are also active in presto, alluxio and other projects. - -Therefore, we believe we have enough experience to deal with open source. +EasyScheduler's core developers have been running it as a community-oriented open source project for some time, several of them already have experience working with open source communities, they are also active in presto, alluxio and other projects.At the same time, we will learn more open source experience from the excellent apache open source project to make up for this shortcoming. ### Homogenous Developers @@ -92,7 +82,7 @@ Considering that fengjr and sefonsoft have shown great interest in EasyScheduler ### Reliance on Salaried Developers -At present, four of the core developers are paid by their employer to contribute to EasyScheduler project. +At present, eight of the core developers are paid by their employer to contribute to EasyScheduler project. we also find some developers and researchers (>8) to contribute to the project, and we will make efforts to increase the diversity of the contributors and actively lobby for Domain experts in the workflow space to contribute. ### Relationships with Other Apache Products @@ -106,20 +96,18 @@ However, we prefer that the community provided by the Apache Software Foundation ## Documentation -A complete set of Sharding-Sphere documentations is provided on shardingsphere.io in both English and Simplified Chinese. +A complete set of EasyScheduler documentations is provided on github in both English and Simplified Chinese. -- [English](https://github.com/analysys/easyscheduler_docs/en_US) -- [Chinese](https://github.com/analysys/easyscheduler_docs/zh_CN) +- [English](https://github.com/analysys/easyscheduler_docs) +- [Chinese](https://github.com/analysys/easyscheduler_docs_cn) ## Initial Source -The project consists of two distinct codebases: core and document. The address of two existed git repositories are as follows: +The project consists of three distinct codebases: core and document. The address of two existed git repositories are as follows: - - - - - +- ## Source and Intellectual Property Submission Plan @@ -129,7 +117,7 @@ As soon as EasyScheduler is approved to join Apache Incubator, Analysys will exe As all backend code dependencies are managed using Apache Maven, none of the external libraries need to be packaged in a source distribution. -most of dependencies have Apache compatible licenses,and the detail as follows: +Most of dependencies have Apache compatible licenses,and the detail as follows: ### Backend Dependency @@ -1389,6 +1377,7 @@ The front-end UI currently relies on many components, which we will list separat - - +- ### Issue Tracking @@ -1406,14 +1395,14 @@ Travis (TODO) ## Initial Committers -- William-GuoWei -- Lidong Dai -- Zhanwei Qiao +- William-GuoWei(guowei20m@outlook.com) +- Lidong Dai(lidong.dai@outlook.com) +- Zhanwei Qiao(qiaozhanwei@outlook.com) - Liang Bao - Gang Li -- Zijian Gong +- Zijian Gong(quanquansy@gmail.com) - Jun Gao -- Baoqi Wu +- Baoqi Wu(wubaoqi@gmail.com) ## Affiliations @@ -1433,17 +1422,14 @@ Travis (TODO) ### Mentors -- Sheng Wu ( Apache Software Foundation Member [wusheng@apache.org](mailto:wusheng@apache.org)) +- Sheng Wu ( Apache Incubator PMC, [wusheng@apache.org](mailto:wusheng@apache.org)) -- ShaoFeng Shi ( Apache Software Foundation Incubator PMC [wusheng@apache.org](mailto:wusheng@apache.org)) +- ShaoFeng Shi ( Apache Kylin committer & PMC, Apache Incubator PMC, [shaofengshi@apache.org](mailto:wusheng@apache.org)) -- Liang Chen ( Apache Software Foundation Member chenliang613@apache.org](mailto:chenliang613@apache.org)) +- Liang Chen ( Apache Software Foundation Member, chenliang613@apache.org](mailto:chenliang613@apache.org)) ### Sponsoring Entity We are expecting the Apache Incubator could sponsor this project. - - - diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/SessionService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/SessionService.java index e57535cf7a..af523823a4 100644 --- a/escheduler-api/src/main/java/cn/escheduler/api/service/SessionService.java +++ b/escheduler-api/src/main/java/cn/escheduler/api/service/SessionService.java @@ -19,6 +19,7 @@ package cn.escheduler.api.service; import cn.escheduler.api.controller.BaseController; import cn.escheduler.api.utils.Constants; +import cn.escheduler.common.utils.CollectionUtils; import cn.escheduler.dao.mapper.SessionMapper; import cn.escheduler.dao.model.Session; import cn.escheduler.dao.model.User; @@ -31,6 +32,7 @@ import org.springframework.stereotype.Service; import javax.servlet.http.Cookie; import javax.servlet.http.HttpServletRequest; import java.util.Date; +import java.util.List; import java.util.UUID; /** @@ -68,7 +70,7 @@ public class SessionService extends BaseService{ String ip = BaseController.getClientIpAddress(request); logger.info("get session: {}, ip: {}", sessionId, ip); - return sessionMapper.queryByIdAndIp(sessionId); + return sessionMapper.queryBySessionId(sessionId); } /** @@ -79,14 +81,26 @@ public class SessionService extends BaseService{ * @return */ public String createSession(User user, String ip) { + Session session = null; + // logined - Session session = sessionMapper.queryByUserIdAndIp(user.getId()); + List sessionList = sessionMapper.queryByUserId(user.getId()); + + + Date now = new Date(); /** * if you have logged in and are still valid, return directly */ - if (session != null) { + if (CollectionUtils.isNotEmpty(sessionList)) { + // is session list greater 1 , delete other ,get one + if (sessionList.size() > 1){ + for (int i=1 ; i < sessionList.size();i++){ + sessionMapper.deleteById(sessionList.get(i).getId()); + } + } + session = sessionList.get(0); if (now.getTime() - session.getLastLoginTime().getTime() <= Constants.SESSION_TIME_OUT * 1000) { /** * updateProcessInstance the latest login time @@ -126,8 +140,11 @@ public class SessionService extends BaseService{ /** * query session by user id and ip */ - Session session = sessionMapper.queryByUserIdAndIp(loginUser.getId()); - //delete session - sessionMapper.deleteById(session.getId()); + List sessionList = sessionMapper.queryByUserId(loginUser.getId()); + + for (Session session : sessionList){ + //delete session + sessionMapper.deleteById(session.getId()); + } } } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/SessionMapper.java b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/SessionMapper.java index 748ee474f7..1137817c47 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/SessionMapper.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/SessionMapper.java @@ -22,6 +22,7 @@ import org.apache.ibatis.type.JdbcType; import java.sql.Timestamp; import java.util.Date; +import java.util.List; /** * session mapper @@ -57,20 +58,6 @@ public interface SessionMapper { int update(@Param("sessionId") String sessionId, @Param("loginTime") Date loginTime); - /** - * query by session id - * @param sessionId - * @return - */ - @Results(value = {@Result(property = "id", column = "id", javaType = String.class, jdbcType = JdbcType.VARCHAR), - @Result(property = "userId", column = "user_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER), - @Result(property = "ip", column = "ip", javaType = String.class, jdbcType = JdbcType.VARCHAR), - @Result(property = "lastLoginTime", column = "last_login_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE) - }) - @SelectProvider(type = SessionMapperProvider.class, method = "queryById") - Session queryById(@Param("sessionId") int sessionId); - - /** * query by session id and ip * @@ -83,8 +70,8 @@ public interface SessionMapper { @Result(property = "ip", column = "ip", javaType = String.class, jdbcType = JdbcType.VARCHAR), @Result(property = "lastLoginTime", column = "last_login_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE) }) - @SelectProvider(type = SessionMapperProvider.class, method = "queryByIdAndIp") - Session queryByIdAndIp(@Param("sessionId") String sessionId); + @SelectProvider(type = SessionMapperProvider.class, method = "queryBySessionId") + Session queryBySessionId(@Param("sessionId") String sessionId); /** @@ -98,7 +85,7 @@ public interface SessionMapper { @Result(property = "ip", column = "ip", javaType = String.class, jdbcType = JdbcType.VARCHAR), @Result(property = "lastLoginTime", column = "last_login_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE) }) - @SelectProvider(type = SessionMapperProvider.class, method = "queryByUserIdAndIp") - Session queryByUserIdAndIp(@Param("userId") int userId); + @SelectProvider(type = SessionMapperProvider.class, method = "queryByUserId") + List queryByUserId(@Param("userId") int userId); } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapper.java b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapper.java index c451f6ce20..a459e9aaae 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapper.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapper.java @@ -68,6 +68,8 @@ public interface TenantMapper { @Result(property = "tenantName", column = "tenant_name", javaType = String.class, jdbcType = JdbcType.VARCHAR), @Result(property = "desc", column = "desc", javaType = String.class, jdbcType = JdbcType.VARCHAR), @Result(property = "queueId", column = "queue_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER), + @Result(property = "queueName", column = "queue_name", javaType = String.class, jdbcType = JdbcType.VARCHAR), + @Result(property = "queue", column = "queue", javaType = String.class, jdbcType = JdbcType.VARCHAR), @Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE), @Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE), }) diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapperProvider.java b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapperProvider.java index f8151781d4..ba86c76f3a 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapperProvider.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TenantMapperProvider.java @@ -97,11 +97,10 @@ public class TenantMapperProvider { public String queryById(Map parameter) { return new SQL() { { - SELECT("*"); - - FROM(TABLE_NAME); - - WHERE("`id` = #{tenantId}"); + SELECT("t.*,q.queue_name,q.queue"); + FROM(TABLE_NAME + " t,t_escheduler_queue q"); + WHERE(" t.queue_id = q.id"); + WHERE(" t.id = #{tenantId}"); } }.toString(); } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/model/Tenant.java b/escheduler-dao/src/main/java/cn/escheduler/dao/model/Tenant.java index a8cc94e1ab..b3249dc362 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/model/Tenant.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/model/Tenant.java @@ -53,6 +53,12 @@ public class Tenant { */ private String queueName; + + /** + * queue + */ + private String queue; + /** * create time */ @@ -126,6 +132,15 @@ public class Tenant { public void setQueueName(String queueName) { this.queueName = queueName; } + + public String getQueue() { + return queue; + } + + public void setQueue(String queue) { + this.queue = queue; + } + @Override public String toString() { return "Tenant{" + @@ -135,6 +150,7 @@ public class Tenant { ", desc='" + desc + '\'' + ", queueId=" + queueId + ", queueName='" + queueName + '\'' + + ", queue='" + queue + '\'' + ", createTime=" + createTime + ", updateTime=" + updateTime + '}'; diff --git a/escheduler-server/src/main/java/cn/escheduler/server/rpc/LoggerServer.java b/escheduler-server/src/main/java/cn/escheduler/server/rpc/LoggerServer.java index ab9e79524b..705596c334 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/rpc/LoggerServer.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/rpc/LoggerServer.java @@ -92,14 +92,21 @@ public class LoggerServer { @Override public void rollViewLog(LogParameter request, StreamObserver responseObserver) { - logger.info("log parameter path : {} ,skipLine : {}, limit : {}", + logger.info("log parameter path : {} ,skip line : {}, limit : {}", request.getPath(), request.getSkipLineNum(), request.getLimit()); List list = readFile(request.getPath(), request.getSkipLineNum(), request.getLimit()); StringBuilder sb = new StringBuilder(); + boolean errorLineFlag = false; for (String line : list){ - sb.append(line + "\r\n"); + if (line.contains("TaskLogger")){ + errorLineFlag = filterLine(request.getPath(),line); + } + + if (!errorLineFlag || !line.contains("TaskLogger")){ + sb.append(line + "\r\n"); + } } RetStrInfo retInfoBuild = RetStrInfo.newBuilder().setMsg(sb.toString()).build(); responseObserver.onNext(retInfoBuild); @@ -123,7 +130,7 @@ public class LoggerServer { responseObserver.onNext(builder.build()); responseObserver.onCompleted(); }catch (Exception e){ - logger.error("get log bytes failed : " + e.getMessage(),e); + logger.error("get log bytes failed",e); } } } @@ -134,23 +141,35 @@ public class LoggerServer { * @return * @throws Exception */ - private static byte[] getFileBytes(String path)throws IOException{ + private static byte[] getFileBytes(String path){ InputStream in = null; ByteArrayOutputStream bos = null; try { in = new FileInputStream(path); bos = new ByteArrayOutputStream(); - byte[] buffer = new byte[4096]; - int n = 0; - while ((n = in.read(buffer)) != -1) { - bos.write(buffer, 0, n); + byte[] buf = new byte[1024]; + int len = 0; + while ((len = in.read(buf)) != -1) { + bos.write(buf, 0, len); } return bos.toByteArray(); }catch (IOException e){ - logger.error("getFileBytes error",e); + logger.error("get file bytes error",e); }finally { - bos.close(); - in.close(); + if (bos != null){ + try { + bos.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + if (in != null){ + try { + in.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } } return null; } @@ -166,7 +185,7 @@ public class LoggerServer { try (Stream stream = Files.lines(Paths.get(path))) { return stream.skip(skipLine).limit(limit).collect(Collectors.toList()); } catch (IOException e) { - logger.error("read file failed : " + e.getMessage(),e); + logger.error("read file failed",e); } return null; } @@ -183,12 +202,20 @@ public class LoggerServer { StringBuilder sb = new StringBuilder(); try { br = new BufferedReader(new InputStreamReader(new FileInputStream(path))); + boolean errorLineFlag = false; while ((line = br.readLine()) != null){ - sb.append(line + "\r\n"); + if (line.contains("TaskLogger")){ + errorLineFlag = filterLine(path,line); + } + + if (!errorLineFlag || !line.contains("TaskLogger")){ + sb.append(line + "\r\n"); + } } + return sb.toString(); }catch (IOException e){ - logger.error("read file failed : " + e.getMessage(),e); + logger.error("read file failed",e); }finally { try { if (br != null){ @@ -201,4 +228,21 @@ public class LoggerServer { return null; } + + /** + * + * @param path + * @param line + * @return + */ + private static boolean filterLine(String path,String line){ + String removeSuffix = path.substring(0, path.length() - 4); + String[] strArrs = removeSuffix.split("/"); + String taskAppId = String.format("%s_%s_%s", + strArrs[strArrs.length - 3], + strArrs[strArrs.length-2], + strArrs[strArrs.length - 1]); + return !line.contains(taskAppId); + } + } \ No newline at end of file diff --git a/escheduler-server/src/main/java/cn/escheduler/server/utils/ParamUtils.java b/escheduler-server/src/main/java/cn/escheduler/server/utils/ParamUtils.java index e3041ffd9d..b16e9f3092 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/utils/ParamUtils.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/utils/ParamUtils.java @@ -44,7 +44,6 @@ public class ParamUtils { CommandType commandType, Date scheduleTime){ if (globalParams == null - && globalParams == null && localParams == null){ return null; } diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java index 0c6b5adb54..fec8c0f9d5 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java @@ -119,7 +119,7 @@ public class WorkerServer implements IStoppable { try { conf = new PropertiesConfiguration(Constants.WORKER_PROPERTIES_PATH); }catch (ConfigurationException e){ - logger.error("load configuration failed : " + e.getMessage(),e); + logger.error("load configuration failed",e); System.exit(1); } } @@ -167,13 +167,12 @@ public class WorkerServer implements IStoppable { Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { @Override public void run() { - String host = OSUtils.getHost(); - // clear worker table register info - serverDao.deleteWorker(host); - logger.info("worker server stopped"); + + logger.warn("worker server stopped"); + // worker server exit alert if (zkWorkerClient.getActiveMasterNum() <= 1) { for (int i = 0; i < Constants.ESCHEDULER_WARN_TIMES_FAILOVER;i++) { - alertDao.sendServerStopedAlert(1, host, "Worker-Server"); + alertDao.sendServerStopedAlert(1, OSUtils.getHost(), "Worker-Server"); } } @@ -231,7 +230,7 @@ public class WorkerServer implements IStoppable { @Override public void run() { // send heartbeat to zk - if (StringUtils.isBlank(zkWorkerClient.getWorkerZNode())){ + if (StringUtils.isEmpty(zkWorkerClient.getWorkerZNode())){ logger.error("worker send heartbeat to zk failed"); } @@ -266,14 +265,8 @@ public class WorkerServer implements IStoppable { continue; }else { int taskInstId=Integer.parseInt(taskInfoArr[1]); + TaskInstance taskInstance = processDao.getTaskInstanceRelationByTaskId(taskInstId); - TaskInstance taskInstance = processDao.findTaskInstanceById(taskInstId); - - ProcessInstance instance = processDao.findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); - - if(instance != null){ - taskInstance.setProcessInstance(instance); - } if(taskInstance.getTaskType().equals(TaskType.DEPENDENT.toString())){ taskInstance.setState(ExecutionStatus.KILL); processDao.saveTaskInstance(taskInstance); diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java index 2208cf5204..31c4def1da 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java @@ -173,30 +173,30 @@ public class FetchTaskThread implements Runnable{ } // get task instance id - taskInstId = Integer.parseInt(taskQueueStr.split(Constants.UNDERLINE)[3]); + + taskInstId = getTaskInstanceId(taskQueueStr); // get task instance relation taskInstance = processDao.getTaskInstanceRelationByTaskId(taskInstId); Tenant tenant = processDao.getTenantForProcess(taskInstance.getProcessInstance().getTenantId(), taskInstance.getProcessDefine().getUserId()); - if(tenant == null){ - logger.error("tenant not exists,process define id : {},process instance id : {},task instance id : {}", - taskInstance.getProcessDefine().getId(), - taskInstance.getProcessInstance().getId(), - taskInstance.getId()); - taskQueue.removeNode(Constants.SCHEDULER_TASKS_QUEUE, taskQueueStr); + + // verify tenant is null + if (verifyTenantIsNull(taskQueueStr, tenant)) { continue; } + // set queue for process instance + taskInstance.getProcessInstance().setQueue(tenant.getQueue()); + logger.info("worker fetch taskId : {} from queue ", taskInstId); // mainly to wait for the master insert task to succeed waitForMasterEnterQueue(); - if (taskInstance == null ) { - logger.error("task instance is null. task id : {} ", taskInstId); - taskQueue.removeNode(Constants.SCHEDULER_TASKS_QUEUE, taskQueueStr); + // verify task instance is null + if (verifyTaskInstanceIsNull(taskQueueStr)) { continue; } @@ -204,16 +204,18 @@ public class FetchTaskThread implements Runnable{ continue; } - // get local execute path - logger.info("task instance local execute path : {} ", getExecLocalPath()); + // local execute path + String execLocalPath = getExecLocalPath(); + + logger.info("task instance local execute path : {} ", execLocalPath); // init task taskInstance.init(OSUtils.getHost(), new Date(), - getExecLocalPath()); + execLocalPath); // check and create Linux users - FileUtils.createWorkDirAndUserIfAbsent(getExecLocalPath(), + FileUtils.createWorkDirAndUserIfAbsent(execLocalPath, tenant.getTenantCode(), logger); logger.info("task : {} ready to submit to task scheduler thread",taskInstId); @@ -232,6 +234,38 @@ public class FetchTaskThread implements Runnable{ } } + /** + * verify task instance is null + * @param taskQueueStr + * @return + */ + private boolean verifyTaskInstanceIsNull(String taskQueueStr) { + if (taskInstance == null ) { + logger.error("task instance is null. task id : {} ", taskInstId); + taskQueue.removeNode(Constants.SCHEDULER_TASKS_QUEUE, taskQueueStr); + return true; + } + return false; + } + + /** + * verify tenant is null + * @param taskQueueStr + * @param tenant + * @return + */ + private boolean verifyTenantIsNull(String taskQueueStr, Tenant tenant) { + if(tenant == null){ + logger.error("tenant not exists,process define id : {},process instance id : {},task instance id : {}", + taskInstance.getProcessDefine().getId(), + taskInstance.getProcessInstance().getId(), + taskInstance.getId()); + taskQueue.removeNode(Constants.SCHEDULER_TASKS_QUEUE, taskQueueStr); + return true; + } + return false; + } + /** * get execute local path * @return @@ -274,4 +308,14 @@ public class FetchTaskThread implements Runnable{ retryTimes--; } } + + /** + * get task instance id + * + * @param taskQueueStr + * @return + */ + private int getTaskInstanceId(String taskQueueStr){ + return Integer.parseInt(taskQueueStr.split(Constants.UNDERLINE)[3]); + } } \ No newline at end of file diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/TaskScheduleThread.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/TaskScheduleThread.java index 6265097930..c045b0713a 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/TaskScheduleThread.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/TaskScheduleThread.java @@ -133,7 +133,7 @@ public class TaskScheduleThread implements Runnable { taskInstance.getId(), CommonUtils.getSystemEnvPath(), tenant.getTenantCode(), - tenant.getQueueName(), + tenant.getQueue(), taskInstance.getStartTime(), getGlobalParamsMap(), taskInstance.getDependency(),