diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000000..26802e12b6
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,7 @@
+Easy Scheduler
+Copyright 2019 The Analysys Foundation
+
+This product includes software developed at
+The Analysys Foundation (https://www.analysys.cn/).
+
+
diff --git a/install.sh b/install.sh
new file mode 100644
index 0000000000..c968464f33
--- /dev/null
+++ b/install.sh
@@ -0,0 +1,350 @@
+#!/bin/bash
+
+workDir=`dirname $0`
+workDir=`cd ${workDir};pwd`
+source $workDir/conf/config/run_config.conf
+source $workDir/conf/config/install_config.conf
+
+# mysql配置
+# mysql 地址,端口
+mysqlHost="192.168.xx.xx:3306"
+
+# mysql 数据库名称
+mysqlDb="escheduler"
+
+# mysql 用户名
+mysqlUserName="xx"
+
+# mysql 密码
+mysqlPassword="xx"
+
+
+# hadoop 配置
+# namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下
+namenodeFs="hdfs://mycluster:8020"
+
+# resourcemanager HA配置,如果是单resourcemanager,这里为空即可
+yarnHaIps="192.168.xx.xx,192.168.xx.xx"
+
+# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好
+singleYarnIp="ark1"
+
+
+# common 配置
+# 程序路径
+programPath="/tmp/escheduler"
+
+#下载路径
+downloadPath="/tmp/escheduler/download"
+
+# 任务执行路径
+execPath="/tmp/escheduler/exec"
+
+# hdfs根路径
+hdfsPath="/escheduler"
+
+# 是否启动hdfs,如果启动则为true,不启动设置为false
+hdfsStartupSate="true"
+
+# SHELL环境变量路径
+shellEnvPath="/opt/.escheduler_env.sh"
+
+# Python换将变量路径
+pythonEnvPath="/opt/escheduler_env.py"
+
+# 资源文件的后缀
+resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
+
+# 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除
+devState="true"
+
+
+# zk 配置
+# zk集群
+zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
+
+# zk根目录
+zkRoot="/escheduler"
+
+# 用来记录挂掉机器的zk目录
+zkDeadServers="/escheduler/dead-servers"
+
+# masters目录
+zkMasters="/escheduler/masters"
+
+# workers目录
+zkWorkers="/escheduler/workers"
+
+# zk master分布式锁
+mastersLock="/escheduler/lock/masters"
+
+# zk worker分布式锁
+workersLock="/escheduler/lock/workers"
+
+# zk master容错分布式锁
+mastersFailover="/escheduler/lock/failover/masters"
+
+# zk worker容错分布式锁
+workersFailover="/escheduler/lock/failover/masters"
+
+# zk session 超时
+zkSessionTimeout="300"
+
+# zk 连接超时
+zkConnectionTimeout="300"
+
+# zk 重试间隔
+zkRetrySleep="100"
+
+# zk重试最大次数
+zkRetryMaxtime="5"
+
+
+# master 配置
+# master执行线程最大数,流程实例的最大并行度
+masterExecThreads="100"
+
+# master任务执行线程最大数,每一个流程实例的最大并行度
+masterExecTaskNum="20"
+
+# master心跳间隔
+masterHeartbeatInterval="10"
+
+# master任务提交重试次数
+masterTaskCommitRetryTimes="5"
+
+# master任务提交重试时间间隔
+masterTaskCommitInterval="100"
+
+# master最大cpu平均负载,用来判断master是否还有执行能力
+masterMaxCupLoadAvg="10"
+
+# master预留内存,用来判断master是否还有执行能力
+masterReservedMemory="1"
+
+
+# worker 配置
+# worker执行线程
+workerExecThreads="100"
+
+# worker心跳间隔
+workerHeartbeatInterval="10"
+
+# worker一次抓取任务数
+workerFetchTaskNum="10"
+
+# worker最大cpu平均负载,用来判断master是否还有执行能力
+workerMaxCupLoadAvg="10"
+
+# worker预留内存,用来判断master是否还有执行能力
+workerReservedMemory="1"
+
+
+# api 配置
+# api 服务端口
+apiServerPort="12345"
+
+# api session 超时
+apiServerSessionTimeout="7200"
+
+# api 上下文路径
+apiServerContextPath="/escheduler/"
+
+# spring 最大文件大小
+springMaxFileSize="1024MB"
+
+# spring 最大请求文件大小
+springMaxRequestSize="1024MB"
+
+# api 最大post请求大小
+apiMaxHttpPostSize="5000000"
+
+
+
+# alert配置
+
+# 邮件协议
+mailProtocol="SMTP"
+
+# 邮件服务host
+mailServerHost="smtp.exmail.qq.com"
+
+# 邮件服务端口
+mailServerPort="25"
+
+# 发送人
+mailSender="xxxxxxxxxx"
+
+# 发送人密码
+mailPassword="xxxxxxxxxx"
+
+# 下载Excel路径
+xlsFilePath="/opt/xls"
+
+# conf/config/install_config.conf配置
+# 安装路径
+installPath="/data1_1T/escheduler"
+
+# 部署用户
+deployUser="escheduler"
+
+# 安装hosts
+ips="ark0,ark1,ark2,ark3,ark4"
+
+
+# conf/config/run_config.conf配置
+# 运行Master的机器
+masters="ark0,ark1"
+
+# 运行Worker的机器
+workers="ark2,ark3,ark4"
+
+# 运行Alert的机器
+alertServer="ark3"
+
+# 运行Api的机器
+apiServers="ark1"
+
+
+# 1,替换文件
+echo "1,替换文件"
+sed -i "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties
+sed -i "s#spring.datasource.username.*#spring.datasource.username=${mysqlUserName}#g" conf/dao/data_source.properties
+sed -i "s#spring.datasource.password.*#spring.datasource.password=${mysqlPassword}#g" conf/dao/data_source.properties
+
+sed -i "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/quartz.properties
+sed -i "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${mysqlUserName}#g" conf/quartz.properties
+sed -i "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${mysqlPassword}#g" conf/quartz.properties
+
+
+sed -i "s#fs.defaultFS.*#fs.defaultFS = ${namenodeFs}#g" conf/common/hadoop/hadoop.properties
+sed -i "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common/hadoop/hadoop.properties
+sed -i "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common/hadoop/hadoop.properties
+
+sed -i "s#data.basedir.path.*#data.basedir.path=${programPath}#g" conf/common/common.properties
+sed -i "s#data.download.basedir.path.*#data.download.basedir.path=${downloadPath}#g" conf/common/common.properties
+sed -i "s#process.exec.basepath.*#process.exec.basepath=${execPath}#g" conf/common/common.properties
+sed -i "s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=${hdfsPath}#g" conf/common/common.properties
+sed -i "s#hdfs.startup.state.*#hdfs.startup.state=${hdfsStartupSate}#g" conf/common/common.properties
+sed -i "s#escheduler.env.path.*#escheduler.env.path=${shellEnvPath}#g" conf/common/common.properties
+sed -i "s#escheduler.env.py.*#escheduler.env.py=${pythonEnvPath}#g" conf/common/common.properties
+sed -i "s#resource.view.suffixs.*#resource.view.suffixs=${resSuffixs}#g" conf/common/common.properties
+sed -i "s#development.state.*#development.state=${devState}#g" conf/common/common.properties
+
+sed -i "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.escheduler.root.*#zookeeper.escheduler.root=${zkRoot}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.escheduler.dead.servers.*#zookeeper.escheduler.dead.servers=${zkDeadServers}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.escheduler.masters.*#zookeeper.escheduler.masters=${zkMasters}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.escheduler.workers.*#zookeeper.escheduler.workers=${zkWorkers}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.escheduler.lock.masters.*#zookeeper.escheduler.lock.masters=${mastersLock}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.escheduler.lock.workers.*#zookeeper.escheduler.lock.workers=${workersLock}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.escheduler.lock.masters.failover.*#zookeeper.escheduler.lock.masters.failover=${mastersFailover}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.escheduler.lock.workers.failover.*#zookeeper.escheduler.lock.workers.failover=${workersFailover}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.session.timeout.*#zookeeper.session.timeout=${zkSessionTimeout}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.connection.timeout.*#zookeeper.connection.timeout=${zkConnectionTimeout}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.retry.sleep.*#zookeeper.retry.sleep=${zkRetrySleep}#g" conf/zookeeper.properties
+sed -i "s#zookeeper.retry.maxtime.*#zookeeper.retry.maxtime=${zkRetryMaxtime}#g" conf/zookeeper.properties
+
+sed -i "s#master.exec.threads.*#master.exec.threads=${masterExecThreads}#g" conf/master.properties
+sed -i "s#master.exec.task.number.*#master.exec.task.number=${masterExecTaskNum}#g" conf/master.properties
+sed -i "s#master.heartbeat.interval.*#master.heartbeat.interval=${masterHeartbeatInterval}#g" conf/master.properties
+sed -i "s#master.task.commit.retryTimes.*#master.task.commit.retryTimes=${masterTaskCommitRetryTimes}#g" conf/master.properties
+sed -i "s#master.task.commit.interval.*#master.task.commit.interval=${masterTaskCommitInterval}#g" conf/master.properties
+sed -i "s#master.max.cpuload.avg.*#master.max.cpuload.avg=${masterMaxCupLoadAvg}#g" conf/master.properties
+sed -i "s#master.reserved.memory.*#master.reserved.memory=${masterReservedMemory}#g" conf/master.properties
+
+
+sed -i "s#worker.exec.threads.*#worker.exec.threads=${workerExecThreads}#g" conf/worker.properties
+sed -i "s#worker.heartbeat.interval.*#worker.heartbeat.interval=${workerHeartbeatInterval}#g" conf/worker.properties
+sed -i "s#worker.fetch.task.num.*#worker.fetch.task.num=${workerFetchTaskNum}#g" conf/worker.properties
+sed -i "s#worker.max.cpuload.avg.*#worker.max.cpuload.avg=${workerMaxCupLoadAvg}#g" conf/worker.properties
+sed -i "s#worker.reserved.memory.*#worker.reserved.memory=${workerReservedMemory}#g" conf/worker.properties
+
+
+sed -i "s#server.port.*#server.port=${apiServerPort}#g" conf/application.properties
+sed -i "s#server.session.timeout.*#server.session.timeout=${apiServerSessionTimeout}#g" conf/application.properties
+sed -i "s#server.context-path.*#server.context-path=${apiServerContextPath}#g" conf/application.properties
+sed -i "s#spring.http.multipart.max-file-size.*#spring.http.multipart.max-file-size=${springMaxFileSize}#g" conf/application.properties
+sed -i "s#spring.http.multipart.max-request-size.*#spring.http.multipart.max-request-size=${springMaxRequestSize}#g" conf/application.properties
+sed -i "s#server.max-http-post-size.*#server.max-http-post-size=${apiMaxHttpPostSize}#g" conf/application.properties
+
+
+sed -i "s#mail.protocol.*#mail.protocol=${mailProtocol}#g" conf/alert.properties
+sed -i "s#mail.server.host.*#mail.server.host=${mailServerHost}#g" conf/alert.properties
+sed -i "s#mail.server.port.*#mail.server.port=${mailServerPort}#g" conf/alert.properties
+sed -i "s#mail.sender.*#mail.sender=${mailSender}#g" conf/alert.properties
+sed -i "s#mail.passwd.*#mail.passwd=${mailPassword}#g" conf/alert.properties
+sed -i "s#xls.file.path.*#xls.file.path=${xlsFilePath}#g" conf/alert.properties
+
+
+sed -i "s#installPath.*#installPath=${installPath}#g" conf/config/install_config.conf
+sed -i "s#deployUser.*#deployUser=${deployUser}#g" conf/config/install_config.conf
+sed -i "s#ips.*#ips=${ips}#g" conf/config/install_config.conf
+
+
+sed -i "s#masters.*#masters=${masters}#g" conf/config/run_config.conf
+sed -i "s#workers.*#workers=${workers}#g" conf/config/run_config.conf
+sed -i "s#alertServer.*#alertServer=${alertServer}#g" conf/config/run_config.conf
+sed -i "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf
+
+
+
+
+# 2,创建目录
+echo "2,创建目录"
+
+if [ ! -d $installPath ];then
+ sudo mkdir -p $installPath
+ sudo chown -R $deployUser:$deployUser $installPath
+fi
+
+hostsArr=(${ips//,/ })
+for host in ${hostsArr[@]}
+do
+
+# 如果programPath不存在,则创建
+if ! ssh $host test -e $programPath; then
+ ssh $host "sudo mkdir -p $programPath;sudo chown -R $deployUser:$deployUser $programPath"
+fi
+
+# 如果downloadPath不存在,则创建
+if ! ssh $host test -e $downloadPath; then
+ ssh $host "sudo mkdir -p $downloadPath;sudo chown -R $deployUser:$deployUser $downloadPath"
+fi
+
+# 如果$execPath不存在,则创建
+if ! ssh $host test -e $execPath; then
+ ssh $host "sudo mkdir -p $execPath; sudo chown -R $deployUser:$deployUser $execPath"
+fi
+
+# 如果$xlsFilePath不存在,则创建
+if ! ssh $host test -e $xlsFilePath; then
+ ssh $host "sudo mkdir -p $xlsFilePath; sudo chown -R $deployUser:$deployUser $xlsFilePath"
+fi
+
+done
+
+
+# 3,停止服务
+echo "3,停止服务"
+sh $workDir/script/stop_all.sh
+
+# 4,删除zk节点
+echo "4,删除zk节点"
+sleep 1
+python $workDir/script/del_zk_node.py $zkQuorum $zkRoot
+
+# 5,scp资源
+echo "5,scp资源"
+sh $workDir/script/scp_hosts.sh
+if [ $? -eq 0 ]
+then
+ echo 'scp拷贝完成'
+else
+ echo 'sc 拷贝失败退出'
+ exit -1
+fi
+
+# 6,启动
+echo "6,启动"
+sh $workDir/script/start_all.sh
\ No newline at end of file
diff --git a/monitor_server.py b/monitor_server.py
new file mode 100644
index 0000000000..a0d1066661
--- /dev/null
+++ b/monitor_server.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+# -*- coding:utf-8 -*-
+# Author:qiaozhanwei
+
+'''
+yum 安装pip
+yum -y install python-pip
+
+pip install kazoo 安装
+conda install -c conda-forge kazoo 安装
+
+运行脚本:
+nohup python -u monitor_server.py > nohup.out 2>&1 &
+'''
+
+import socket
+import os
+import sched
+import time
+from datetime import datetime
+from kazoo.client import KazooClient
+
+
+schedule = sched.scheduler(time.time, time.sleep)
+
+class ZkClient:
+ def __init__(self):
+ # hosts配置zk地址集群
+ self.zk = KazooClient(hosts='ark0:2181,ark1:2181,ark2:2181')
+ self.zk.start()
+
+ # 读取配置文件,组装成字典
+ def read_file(self,path):
+ with open(path, 'r') as f:
+ dict = {}
+ for line in f.readlines():
+ arr = line.strip().split('=')
+ if (len(arr) == 2):
+ dict[arr[0]] = arr[1]
+ return dict
+
+ # 根据hostname获取ip地址
+ def get_ip_by_hostname(self,hostname):
+ return socket.gethostbyname(hostname)
+
+ # 重启服务
+ def restart_server(self,inc):
+ config_dict = self.read_file('/data1_1T/escheduler/conf/config/run_config.conf')
+
+ master_list = config_dict.get('masters').split(',')
+ master_list = list(map(lambda item : self.get_ip_by_hostname(item),master_list))
+
+ worker_list = config_dict.get('workers').split(',')
+ worker_list = list(map(lambda item: self.get_ip_by_hostname(item), worker_list))
+
+ if (self.zk.exists('/escheduler/masters')):
+ zk_master_list = []
+ zk_master_nodes = self.zk.get_children('/escheduler/masters')
+ for zk_master_node in zk_master_nodes:
+ zk_master_list.append(zk_master_node.split('_')[0])
+ restart_master_list = list(set(master_list) - set(zk_master_list))
+ if (len(restart_master_list) != 0):
+ for master in restart_master_list:
+ print("master " + self.get_ip_by_hostname(master) + " 服务已经掉了")
+ os.system('ssh ' + self.get_ip_by_hostname(master) + ' sh /data1_1T/escheduler/bin/escheduler-daemon.sh start master-server')
+
+ if (self.zk.exists('/escheduler/workers')):
+ zk_worker_list = []
+ zk_worker_nodes = self.zk.get_children('/escheduler/workers')
+ for zk_worker_node in zk_worker_nodes:
+ zk_worker_list.append(zk_worker_node.split('_')[0])
+ restart_worker_list = list(set(worker_list) - set(zk_worker_list))
+ if (len(restart_worker_list) != 0):
+ for worker in restart_worker_list:
+ print("worker " + self.get_ip_by_hostname(worker) + " 服务已经掉了")
+ os.system('ssh ' + self.get_ip_by_hostname(worker) + ' sh /data1_1T/escheduler/bin/escheduler-daemon.sh start worker-server')
+
+ print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+ schedule.enter(inc, 0, self.restart_server, (inc,))
+ # 默认参数60s
+ def main(self,inc=60):
+ # enter四个参数分别为:间隔事件、优先级(用于同时间到达的两个事件同时执行时定序)、被调用触发的函数,
+ # 给该触发函数的参数(tuple形式)
+ schedule.enter(0, 0, self.restart_server, (inc,))
+ schedule.run()
+if __name__ == '__main__':
+ zkClient = ZkClient()
+ zkClient.main(300)
\ No newline at end of file
diff --git a/package.xml b/package.xml
new file mode 100644
index 0000000000..d127415050
--- /dev/null
+++ b/package.xml
@@ -0,0 +1,72 @@
+
+ all
+
+ tar.gz
+ dir
+
+ false
+
+
+
+ escheduler-server/target/escheduler-server-${project.version}
+
+ **/*.*
+
+ .
+
+
+
+ escheduler-api/target/escheduler-api-${project.version}
+
+ **/*.*
+
+ .
+
+
+
+ escheduler-alert/target/escheduler-alert-${project.version}
+
+ **/*.*
+
+ .
+
+
+
+ sql
+
+ **/*.*
+
+ ./sql
+
+
+
+ script
+
+ *.*
+
+ ./script
+
+
+
+ script
+
+ config/*.*
+ env/*.*
+
+ ./conf
+
+
+
+ ./
+
+ *.sh
+ *.py
+
+ .
+
+
+
+
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 0000000000..7c31791874
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,426 @@
+
+
+ 4.0.0
+ cn.analysys
+ escheduler
+ 1.0.0
+ pom
+ escheduler
+ http://maven.apache.org
+
+
+ UTF-8
+ UTF-8
+ 2.12.0
+ 4.3.7.RELEASE
+ 1.4.5.RELEASE
+ 1.8
+ 1.2.3
+ 2.7.3
+ 2.2.3
+
+
+
+
+
+
+ org.mybatis.spring.boot
+ mybatis-spring-boot-autoconfigure
+ 1.2.0
+
+
+ org.mybatis.spring.boot
+ mybatis-spring-boot-starter
+ 1.2.0
+
+
+
+ org.quartz-scheduler
+ quartz
+ ${quartz.version}
+
+
+ org.quartz-scheduler
+ quartz-jobs
+ ${quartz.version}
+
+
+ com.cronutils
+ cron-utils
+ 5.0.5
+
+
+ org.mybatis
+ mybatis
+ 3.4.2
+
+
+ org.mybatis
+ mybatis-spring
+ 1.3.1
+
+
+ org.springframework
+ spring-tx
+ ${spring.version}
+
+
+ org.springframework
+ spring-jdbc
+ ${spring.version}
+
+
+ com.alibaba
+ fastjson
+ 1.2.29
+
+
+ org.springframework.boot
+ spring-boot-starter-jetty
+ ${spring.boot.version}
+
+
+ org.springframework
+ spring-core
+ ${spring.version}
+
+
+ org.springframework
+ spring-context
+ ${spring.version}
+
+
+ org.springframework
+ spring-beans
+ ${spring.version}
+
+
+ org.springframework
+ spring-test
+ ${spring.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-parent
+ ${spring.boot.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-web
+ ${spring.boot.version}
+
+
+ org.springframework.boot
+ spring-boot-starter-test
+ ${spring.boot.version}
+ test
+
+
+ org.springframework.boot
+ spring-boot-starter-aop
+ ${spring.boot.version}
+
+
+
+ cn.analysys
+ escheduler-common
+ ${project.version}
+
+
+ cn.analysys
+ escheduler-dao
+ ${project.version}
+
+
+ cn.analysys
+ escheduler-api
+ ${project.version}
+
+
+ cn.analysys
+ escheduler-rpc
+ ${project.version}
+
+
+ cn.analysys
+ escheduler-alert
+ ${project.version}
+
+
+ org.apache.curator
+ curator-framework
+ ${curator.version}
+
+
+ org.apache.curator
+ curator-recipes
+ ${curator.version}
+
+
+
+ commons-codec
+ commons-codec
+ 1.6
+
+
+ commons-logging
+ commons-logging
+ 1.1.1
+
+
+ org.apache.httpcomponents
+ httpclient
+ 4.3.2
+
+
+ org.apache.httpcomponents
+ httpcore
+ 4.3.2
+
+
+ com.fasterxml.jackson.core
+ jackson-annotations
+ 2.8.6
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
+ 2.8.6
+
+
+ com.fasterxml.jackson.core
+ jackson-core
+ 2.8.6
+
+
+
+ junit
+ junit
+ 4.12
+
+
+ mysql
+ mysql-connector-java
+ 5.1.34
+
+
+ org.slf4j
+ slf4j-api
+ 1.7.5
+
+
+
+ org.slf4j
+ slf4j-log4j12
+ 1.7.5
+
+
+
+ commons-collections
+ commons-collections
+ 3.2.2
+
+
+
+ commons-lang
+ commons-lang
+ 2.3
+
+
+ org.apache.commons
+ commons-lang3
+ 3.5
+
+
+ commons-httpclient
+ commons-httpclient
+ 3.0.1
+
+
+ commons-beanutils
+ commons-beanutils
+ 1.7.0
+
+
+
+ commons-configuration
+ commons-configuration
+ 1.10
+
+
+
+
+ com.alibaba
+ druid
+ 1.1.14
+
+
+
+ ch.qos.logback
+ logback-classic
+ ${logback.version}
+
+
+ ch.qos.logback
+ logback-core
+ ${logback.version}
+
+
+
+
+ org.apache.commons
+ commons-email
+ 1.2
+
+
+ javax.mail
+ mail
+ 1.4.5
+
+
+
+
+ org.apache.poi
+ poi
+ 3.17
+
+
+
+ org.freemarker
+ freemarker
+ 2.3.21
+
+
+
+
+
+ org.apache.hadoop
+ hadoop-common
+ ${hadoop.version}
+
+
+ slf4j-log4j12
+ org.slf4j
+
+
+ com.sun.jersey
+ jersey-json
+
+
+
+
+ org.apache.hadoop
+ hadoop-client
+ ${hadoop.version}
+
+
+ org.apache.hadoop
+ hadoop-hdfs
+ ${hadoop.version}
+
+
+ org.apache.hadoop
+ hadoop-yarn-common
+ ${hadoop.version}
+
+
+
+ javax.servlet
+ javax.servlet-api
+ 3.1.0
+
+
+
+ org.apache.commons
+ commons-collections4
+ 4.1
+
+
+
+ com.google.guava
+ guava
+ 19.0
+
+
+
+ org.postgresql
+ postgresql
+ 42.1.4
+
+
+
+ org.apache.hive
+ hive-jdbc
+ 2.1.0
+
+
+
+ commons-io
+ commons-io
+ 2.4
+
+
+
+ com.github.oshi
+ oshi-core
+ 3.5.0
+
+
+
+
+
+
+
+
+ maven-assembly-plugin
+ 2.6
+
+
+ package.xml
+
+ false
+
+
+
+ make-assembly
+ package
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 2.3.2
+
+
+ ${java.version}
+ ${project.build.sourceEncoding}
+ false
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+ 2.19.1
+
+
+ **/*Test*.java
+
+ true
+
+
+
+
+
+
+
+ escheduler-server
+ escheduler-common
+ escheduler-api
+ escheduler-dao
+ escheduler-alert
+ escheduler-rpc
+
+
+
\ No newline at end of file
diff --git a/script/config/install_config.conf b/script/config/install_config.conf
new file mode 100644
index 0000000000..43b955d4f1
--- /dev/null
+++ b/script/config/install_config.conf
@@ -0,0 +1,3 @@
+installPath=/data1_1T/escheduler
+deployUser=escheduler
+ips=ark0,ark1,ark2,ark3,ark4
diff --git a/script/config/run_config.conf b/script/config/run_config.conf
new file mode 100644
index 0000000000..f4cfd832c4
--- /dev/null
+++ b/script/config/run_config.conf
@@ -0,0 +1,4 @@
+masters=ark0,ark1
+workers=ark2,ark3,ark4
+alertServer=ark3
+apiServers=ark1
\ No newline at end of file
diff --git a/script/del_zk_node.py b/script/del_zk_node.py
new file mode 100644
index 0000000000..8396f27c55
--- /dev/null
+++ b/script/del_zk_node.py
@@ -0,0 +1,17 @@
+import time
+import sys
+from kazoo.client import KazooClient
+
+class ZkClient:
+ def __init__(self):
+ self.zk = KazooClient(hosts=sys.argv[1])
+ self.zk.start()
+ def del_node(self):
+ self.zk.delete(sys.argv[2], recursive=True)
+ print('deleted success')
+ def __del__(self):
+ self.zk.stop()
+if __name__ == '__main__':
+ zkclient = ZkClient()
+ zkclient.del_node()
+ time.sleep(2)
diff --git a/script/env/.escheduler_env.sh b/script/env/.escheduler_env.sh
new file mode 100644
index 0000000000..7a258cc75d
--- /dev/null
+++ b/script/env/.escheduler_env.sh
@@ -0,0 +1,9 @@
+export HADOOP_HOME=/opt/soft/hadoop
+export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
+export SPARK_HOME1=/opt/soft/spark1
+export SPARK_HOME2=/opt/soft/spark2
+export PYTHON_HOME=/opt/soft/python
+export JAVA_HOME=/opt/soft/java
+export HIVE_HOME=/opt/soft/hive
+
+export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
\ No newline at end of file
diff --git a/script/env/escheduler_env.py b/script/env/escheduler_env.py
new file mode 100644
index 0000000000..e1d0afef4a
--- /dev/null
+++ b/script/env/escheduler_env.py
@@ -0,0 +1,12 @@
+import os
+
+HADOOP_HOME="/opt/soft/hadoop"
+SPARK_HOME1="/opt/soft/spark1"
+SPARK_HOME2="/opt/soft/spark2"
+PYTHON_HOME="/opt/soft/python"
+JAVA_HOME="/opt/soft/java"
+HIVE_HOME="/opt/soft/hive"
+PATH=os.environ['PATH']
+PATH="%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s"%(HIVE_HOME,HADOOP_HOME,SPARK_HOME1,SPARK_HOME2,JAVA_HOME,PYTHON_HOME,PATH)
+
+os.putenv('PATH','%s'%PATH)
\ No newline at end of file
diff --git a/script/scp_hosts.sh b/script/scp_hosts.sh
new file mode 100644
index 0000000000..f77d535775
--- /dev/null
+++ b/script/scp_hosts.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+workDir=`dirname $0`
+workDir=`cd ${workDir};pwd`
+source $workDir/../conf/config/run_config.conf
+source $workDir/../conf/config/install_config.conf
+
+tar -zxvf $workDir/../escheduler-1.0.0-PREVIEW.tar.gz -C $installPath
+
+hostsArr=(${ips//,/ })
+for host in ${hostsArr[@]}
+do
+
+ if ! ssh $host test -e $installPath; then
+ ssh $host "sudo mkdir -p $installPath; sudo chown -R $deployUser:$deployUser $installPath"
+ fi
+
+ ssh $host "cd $installPath/; rm -rf bin/ conf/ lib/ script/ sql/"
+ scp -r $workDir/../bin $host:$installPath
+ scp -r $workDir/../conf $host:$installPath
+ scp -r $workDir/../lib $host:$installPath
+ scp -r $workDir/../script $host:$installPath
+ scp -r $workDir/../sql $host:$installPath
+ scp $workDir/../install.sh $host:$installPath
+done
diff --git a/script/start_all.sh b/script/start_all.sh
new file mode 100644
index 0000000000..f0e8ecb8cd
--- /dev/null
+++ b/script/start_all.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+workDir=`dirname $0`
+workDir=`cd ${workDir};pwd`
+source $workDir/../conf/config/run_config.conf
+source $workDir/../conf/config/install_config.conf
+
+mastersHost=(${masters//,/ })
+for master in ${mastersHost[@]}
+do
+ echo $master
+ ssh $master "cd $installPath/; sh bin/escheduler-daemon.sh start master-server;"
+
+done
+
+workersHost=(${workers//,/ })
+for worker in ${workersHost[@]}
+do
+ echo $worker
+
+ ssh $worker "cd $installPath/; sh bin/escheduler-daemon.sh start worker-server;"
+ ssh $worker "cd $installPath/; sh bin/escheduler-daemon.sh start logger-server;"
+done
+
+ssh $alertServer "cd $installPath/; sh bin/escheduler-daemon.sh start alert-server;"
+
+apiServersHost=(${apiServers//,/ })
+for apiServer in ${apiServersHost[@]}
+do
+ echo $apiServer
+
+ ssh $apiServer "cd $installPath/; sh bin/escheduler-daemon.sh start api-server;"
+done
+
diff --git a/script/stop_all.sh b/script/stop_all.sh
new file mode 100644
index 0000000000..a9f92a072f
--- /dev/null
+++ b/script/stop_all.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+workDir=`dirname $0`
+workDir=`cd ${workDir};pwd`
+source $workDir/../conf/config/run_config.conf
+source $workDir/../conf/config/install_config.conf
+
+mastersHost=(${masters//,/ })
+for master in ${mastersHost[@]}
+do
+ echo $master
+ ssh $master "cd $installPath/; sh bin/escheduler-daemon.sh stop master-server;"
+
+done
+
+workersHost=(${workers//,/ })
+for worker in ${workersHost[@]}
+do
+ echo $worker
+
+ ssh $worker "cd $installPath/; sh bin/escheduler-daemon.sh stop worker-server;"
+ ssh $worker "cd $installPath/; sh bin/escheduler-daemon.sh stop logger-server;"
+done
+
+ssh $alertServer "cd $installPath/; sh bin/escheduler-daemon.sh stop alert-server;"
+
+apiServersHost=(${apiServers//,/ })
+for apiServer in ${apiServersHost[@]}
+do
+ echo $apiServer
+
+ ssh $apiServer "cd $installPath/; sh bin/escheduler-daemon.sh stop api-server;"
+done
+
diff --git a/sql/escheduler.sql b/sql/escheduler.sql
new file mode 100644
index 0000000000..7dd5b874d7
--- /dev/null
+++ b/sql/escheduler.sql
@@ -0,0 +1,436 @@
+/*
+Navicat MySQL Data Transfer
+
+Source Server : xx.xx
+Source Server Version : 50725
+Source Host : 192.168.xx.xx:3306
+Source Database : escheduler
+
+Target Server Type : MYSQL
+Target Server Version : 50725
+File Encoding : 65001
+
+Date: 2019-03-23 11:47:30
+*/
+
+SET FOREIGN_KEY_CHECKS=0;
+
+-- ----------------------------
+-- Table structure for t_escheduler_alert
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_alert`;
+CREATE TABLE `t_escheduler_alert` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `title` varchar(64) DEFAULT NULL COMMENT '消息标题',
+ `show_type` tinyint(4) DEFAULT NULL COMMENT '发送格式,0是TABLE,1是TEXT',
+ `content` text COMMENT '消息内容(可以是邮件,可以是短信。邮件是JSON Map存放,短信是字符串)',
+ `alert_type` tinyint(4) DEFAULT NULL COMMENT '0是邮件,1是短信',
+ `alert_status` tinyint(4) DEFAULT '0' COMMENT '0是待执行,1是执行成功,2执行失败',
+ `log` text COMMENT '执行日志',
+ `alertgroup_id` int(11) DEFAULT NULL COMMENT '发送组',
+ `receivers` text COMMENT '收件人',
+ `receivers_cc` text COMMENT '抄送人',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_alertgroup
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_alertgroup`;
+CREATE TABLE `t_escheduler_alertgroup` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `group_name` varchar(255) DEFAULT NULL COMMENT '组名称',
+ `group_type` tinyint(4) DEFAULT NULL COMMENT '组类型(邮件0,短信1...)',
+ `desc` varchar(255) DEFAULT NULL COMMENT '备注',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_command
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_command`;
+CREATE TABLE `t_escheduler_command` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `command_type` tinyint(4) DEFAULT NULL COMMENT '命令类型:0 启动工作流,1 从当前节点开始执行,2 恢复被容错的工作流,3 恢复暂停流程,4 从失败节点开始执行,5 补数,6 调度,7 重跑,8 暂停,9 停止,10 恢复等待线程',
+ `process_definition_id` int(11) DEFAULT NULL COMMENT '流程定义id',
+ `command_param` text COMMENT '命令的参数(json格式)',
+ `task_depend_type` tinyint(4) DEFAULT NULL COMMENT '节点依赖类型:0 当前节点,1 向前执行,2 向后执行',
+ `failure_strategy` tinyint(4) DEFAULT '0' COMMENT '失败策略:0结束,1继续',
+ `warning_type` tinyint(4) DEFAULT '0' COMMENT '告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发',
+ `warning_group_id` int(11) DEFAULT NULL COMMENT '告警组',
+ `schedule_time` datetime DEFAULT NULL COMMENT '预期运行时间',
+ `start_time` datetime DEFAULT NULL COMMENT '开始时间',
+ `executor_id` int(11) DEFAULT NULL COMMENT '执行用户id',
+ `dependence` varchar(255) DEFAULT NULL COMMENT '依赖字段',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ `process_instance_priority` int(11) DEFAULT NULL COMMENT '流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_datasource
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_datasource`;
+CREATE TABLE `t_escheduler_datasource` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `name` varchar(64) NOT NULL COMMENT '数据源名称',
+ `note` varchar(256) DEFAULT NULL COMMENT '描述',
+ `type` tinyint(4) NOT NULL COMMENT '数据源类型:0 mysql,1 postgresql,2 hive,3 spark',
+ `user_id` int(11) NOT NULL COMMENT '创建用户id',
+ `connection_params` text NOT NULL COMMENT '连接参数(json格式)',
+ `create_time` datetime NOT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_master_server
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_master_server`;
+CREATE TABLE `t_escheduler_master_server` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `host` varchar(45) DEFAULT NULL COMMENT 'ip',
+ `port` int(11) DEFAULT NULL COMMENT '进程号',
+ `zk_directory` varchar(64) DEFAULT NULL COMMENT 'zk注册目录',
+ `res_info` varchar(256) DEFAULT NULL COMMENT '集群资源信息:json格式{"cpu":xxx,"memroy":xxx}',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `last_heartbeat_time` datetime DEFAULT NULL COMMENT '最后心跳时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_process_definition
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_process_definition`;
+CREATE TABLE `t_escheduler_process_definition` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `name` varchar(255) DEFAULT NULL COMMENT '流程定义名称',
+ `version` int(11) DEFAULT NULL COMMENT '流程定义版本',
+ `release_state` tinyint(4) DEFAULT NULL COMMENT '流程定义的发布状态:0 未上线 1已上线',
+ `project_id` int(11) DEFAULT NULL COMMENT '项目id',
+ `user_id` int(11) DEFAULT NULL COMMENT '流程定义所属用户id',
+ `process_definition_json` longtext COMMENT '流程定义json串',
+ `desc` text COMMENT '流程定义描述',
+ `global_params` text COMMENT '全局参数',
+ `flag` tinyint(4) DEFAULT NULL COMMENT '流程是否可用\r\n:0 不可用\r\n,1 可用',
+ `locations` text COMMENT '节点坐标信息',
+ `connects` text COMMENT '节点连线信息',
+ `receivers` text COMMENT '收件人',
+ `receivers_cc` text COMMENT '抄送人',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`),
+ KEY `process_definition_index` (`project_id`,`id`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_process_instance
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_process_instance`;
+CREATE TABLE `t_escheduler_process_instance` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `name` varchar(255) DEFAULT NULL COMMENT '流程实例名称',
+ `process_definition_id` int(11) DEFAULT NULL COMMENT '流程定义id',
+ `state` tinyint(4) DEFAULT NULL COMMENT '流程实例状态:0 提交成功,1 正在运行,2 准备暂停,3 暂停,4 准备停止,5 停止,6 失败,7 成功,8 需要容错,9 kill,10 等待线程,11 等待依赖完成',
+ `recovery` tinyint(4) DEFAULT NULL COMMENT '流程实例容错标识:0 正常,1 需要被容错重启',
+ `start_time` datetime DEFAULT NULL COMMENT '流程实例开始时间',
+ `end_time` datetime DEFAULT NULL COMMENT '流程实例结束时间',
+ `run_times` int(11) DEFAULT NULL COMMENT '流程实例运行次数',
+ `host` varchar(45) DEFAULT NULL COMMENT '流程实例所在的机器',
+ `command_type` tinyint(4) DEFAULT NULL COMMENT '命令类型:0 启动工作流,1 从当前节点开始执行,2 恢复被容错的工作流,3 恢复暂停流程,4 从失败节点开始执行,5 补数,6 调度,7 重跑,8 暂停,9 停止,10 恢复等待线程',
+ `command_param` text COMMENT '命令的参数(json格式)',
+ `task_depend_type` tinyint(4) DEFAULT NULL COMMENT '节点依赖类型:0 当前节点,1 向前执行,2 向后执行',
+ `max_try_times` tinyint(4) DEFAULT '0' COMMENT '最大重试次数',
+ `failure_strategy` tinyint(4) DEFAULT '0' COMMENT '失败策略 0 失败后结束,1 失败后继续',
+ `warning_type` tinyint(4) DEFAULT '0' COMMENT '告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发',
+ `warning_group_id` int(11) DEFAULT NULL COMMENT '告警组id',
+ `schedule_time` datetime DEFAULT NULL COMMENT '预期运行时间',
+ `command_start_time` datetime DEFAULT NULL COMMENT '开始命令时间',
+ `global_params` text COMMENT '全局参数(固化流程定义的参数)',
+ `process_instance_json` longtext COMMENT '流程实例json(copy的流程定义的json)',
+ `flag` tinyint(4) DEFAULT '1' COMMENT '是否可用,1 可用,0不可用',
+ `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `is_sub_process` int(11) DEFAULT '0' COMMENT '是否是子工作流 1 是,0 不是',
+ `executor_id` int(11) NOT NULL COMMENT '命令执行者',
+ `locations` text COMMENT '节点坐标信息',
+ `connects` text COMMENT '节点连线信息',
+ `history_cmd` text COMMENT '历史命令,记录所有对流程实例的操作',
+ `dependence_schedule_times` text COMMENT '依赖节点的预估时间',
+ `process_instance_priority` int(11) DEFAULT NULL COMMENT '流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
+ PRIMARY KEY (`id`),
+ KEY `process_instance_index` (`process_definition_id`,`id`) USING BTREE,
+ KEY `start_time_index` (`start_time`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_project
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_project`;
+CREATE TABLE `t_escheduler_project` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `name` varchar(100) DEFAULT NULL COMMENT '项目名称',
+ `desc` varchar(200) DEFAULT NULL COMMENT '项目描述',
+ `user_id` int(11) DEFAULT NULL COMMENT '所属用户',
+ `flag` tinyint(4) DEFAULT '1' COMMENT '是否可用 1 可用,0 不可用',
+ `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+ `update_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
+ PRIMARY KEY (`id`),
+ KEY `user_id_index` (`user_id`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_queue
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_queue`;
+CREATE TABLE `t_escheduler_queue` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `queue_name` varchar(64) DEFAULT NULL COMMENT '队列名称',
+ `queue` varchar(64) DEFAULT NULL COMMENT 'yarn队列名称',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_relation_datasource_user
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_relation_datasource_user`;
+CREATE TABLE `t_escheduler_relation_datasource_user` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `user_id` int(11) NOT NULL COMMENT '用户id',
+ `datasource_id` int(11) DEFAULT NULL COMMENT '数据源id',
+ `perm` int(11) DEFAULT '1' COMMENT '权限',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_relation_process_instance
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_relation_process_instance`;
+CREATE TABLE `t_escheduler_relation_process_instance` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `parent_process_instance_id` int(11) DEFAULT NULL COMMENT '父流程实例id',
+ `parent_task_instance_id` int(11) DEFAULT NULL COMMENT '父任务实例id',
+ `process_instance_id` int(11) DEFAULT NULL COMMENT '子流程实例id',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_relation_project_user
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_relation_project_user`;
+CREATE TABLE `t_escheduler_relation_project_user` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `user_id` int(11) NOT NULL COMMENT '用户id',
+ `project_id` int(11) DEFAULT NULL COMMENT '项目id',
+ `perm` int(11) DEFAULT '1' COMMENT '权限',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`),
+ KEY `user_id_index` (`user_id`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_relation_resources_user
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_relation_resources_user`;
+CREATE TABLE `t_escheduler_relation_resources_user` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `user_id` int(11) NOT NULL COMMENT '用户id',
+ `resources_id` int(11) DEFAULT NULL COMMENT '资源id',
+ `perm` int(11) DEFAULT '1' COMMENT '权限',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_relation_udfs_user
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_relation_udfs_user`;
+CREATE TABLE `t_escheduler_relation_udfs_user` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `user_id` int(11) NOT NULL COMMENT '用户id',
+ `udf_id` int(11) DEFAULT NULL COMMENT 'udf id',
+ `perm` int(11) DEFAULT '1' COMMENT '权限',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_relation_user_alertgroup
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_relation_user_alertgroup`;
+CREATE TABLE `t_escheduler_relation_user_alertgroup` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `alertgroup_id` int(11) DEFAULT NULL COMMENT '组消息id',
+ `user_id` int(11) DEFAULT NULL COMMENT '用户id',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_resources
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_resources`;
+CREATE TABLE `t_escheduler_resources` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `alias` varchar(64) DEFAULT NULL COMMENT '别名',
+ `file_name` varchar(64) DEFAULT NULL COMMENT '文件名',
+ `desc` varchar(256) DEFAULT NULL COMMENT '描述',
+ `user_id` int(11) DEFAULT NULL COMMENT '用户id',
+ `type` tinyint(4) DEFAULT NULL COMMENT '资源类型,0 FILE,1 UDF',
+ `size` bigint(20) DEFAULT NULL COMMENT '资源大小',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_schedules
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_schedules`;
+CREATE TABLE `t_escheduler_schedules` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `process_definition_id` int(11) NOT NULL COMMENT '流程定义id',
+ `start_time` datetime NOT NULL COMMENT '调度开始时间',
+ `end_time` datetime NOT NULL COMMENT '调度结束时间',
+ `crontab` varchar(256) NOT NULL COMMENT 'crontab 表达式',
+ `failure_strategy` tinyint(4) NOT NULL COMMENT '失败策略: 0 结束,1 继续',
+ `user_id` int(11) NOT NULL COMMENT '用户id',
+ `release_state` tinyint(4) NOT NULL COMMENT '状态:0 未上线,1 上线',
+ `warning_type` tinyint(4) NOT NULL COMMENT '告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发',
+ `warning_group_id` int(11) DEFAULT NULL COMMENT '告警组id',
+ `process_instance_priority` int(11) DEFAULT NULL COMMENT '流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
+ `create_time` datetime NOT NULL COMMENT '创建时间',
+ `update_time` datetime NOT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_session
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_session`;
+CREATE TABLE `t_escheduler_session` (
+ `id` varchar(64) NOT NULL COMMENT '主键',
+ `user_id` int(11) DEFAULT NULL COMMENT '用户id',
+ `ip` varchar(45) DEFAULT NULL COMMENT '登录ip',
+ `last_login_time` datetime DEFAULT NULL COMMENT '最后登录时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_task_instance
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_task_instance`;
+CREATE TABLE `t_escheduler_task_instance` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `name` varchar(255) DEFAULT NULL COMMENT '任务名称',
+ `task_type` varchar(64) DEFAULT NULL COMMENT '任务类型',
+ `process_definition_id` int(11) DEFAULT NULL COMMENT '流程定义id',
+ `process_instance_id` int(11) DEFAULT NULL COMMENT '流程实例id',
+ `task_json` longtext COMMENT '任务节点json',
+ `state` tinyint(4) DEFAULT NULL COMMENT '任务实例状态:0 提交成功,1 正在运行,2 准备暂停,3 暂停,4 准备停止,5 停止,6 失败,7 成功,8 需要容错,9 kill,10 等待线程,11 等待依赖完成',
+ `submit_time` datetime DEFAULT NULL COMMENT '任务提交时间',
+ `start_time` datetime DEFAULT NULL COMMENT '任务开始时间',
+ `end_time` datetime DEFAULT NULL COMMENT '任务结束时间',
+ `host` varchar(45) DEFAULT NULL COMMENT '执行任务的机器',
+ `execute_path` varchar(200) DEFAULT NULL COMMENT '任务执行路径',
+ `log_path` varchar(200) DEFAULT NULL COMMENT '任务日志路径',
+ `alert_flag` tinyint(4) DEFAULT NULL COMMENT '是否告警',
+ `retry_times` int(4) DEFAULT '0' COMMENT '重试次数',
+ `pid` int(4) DEFAULT NULL COMMENT '进程pid',
+ `app_link` varchar(255) DEFAULT NULL COMMENT 'yarn app id',
+ `flag` tinyint(4) DEFAULT '1' COMMENT '是否可用:0 不可用,1 可用',
+ `retry_interval` int(4) DEFAULT NULL COMMENT '重试间隔',
+ `max_retry_times` int(2) DEFAULT NULL COMMENT '最大重试次数',
+ `task_instance_priority` int(11) DEFAULT NULL COMMENT '任务实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
+ PRIMARY KEY (`id`),
+ KEY `process_instance_id` (`process_instance_id`) USING BTREE,
+ KEY `task_instance_index` (`process_definition_id`,`process_instance_id`) USING BTREE,
+ CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_escheduler_process_instance` (`id`) ON DELETE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_tenant
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_tenant`;
+CREATE TABLE `t_escheduler_tenant` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `tenant_code` varchar(64) DEFAULT NULL COMMENT '租户编码',
+ `tenant_name` varchar(64) DEFAULT NULL COMMENT '租户名称',
+ `desc` varchar(256) DEFAULT NULL COMMENT '描述',
+ `queue_id` int(11) DEFAULT NULL COMMENT '队列id',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_udfs
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_udfs`;
+CREATE TABLE `t_escheduler_udfs` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `user_id` int(11) NOT NULL COMMENT '用户id',
+ `func_name` varchar(100) NOT NULL COMMENT 'UDF函数名',
+ `class_name` varchar(255) NOT NULL COMMENT '类名',
+ `type` tinyint(4) NOT NULL COMMENT 'Udf函数类型',
+ `arg_types` varchar(255) DEFAULT NULL COMMENT '参数',
+ `database` varchar(255) DEFAULT NULL COMMENT '库名',
+ `desc` varchar(255) DEFAULT NULL COMMENT '描述',
+ `resource_id` int(11) NOT NULL COMMENT '资源id',
+ `resource_name` varchar(255) NOT NULL COMMENT '资源名称',
+ `create_time` datetime NOT NULL COMMENT '创建时间',
+ `update_time` datetime NOT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_user
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_user`;
+CREATE TABLE `t_escheduler_user` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '用户id',
+ `user_name` varchar(64) DEFAULT NULL COMMENT '用户名',
+ `user_password` varchar(64) DEFAULT NULL COMMENT '用户密码',
+ `user_type` tinyint(4) DEFAULT NULL COMMENT '用户类型:0 管理员,1 普通用户',
+ `email` varchar(64) DEFAULT NULL COMMENT '邮箱',
+ `phone` varchar(11) DEFAULT NULL COMMENT '手机',
+ `tenant_id` int(11) DEFAULT NULL COMMENT '管理员0,普通用户所属租户id',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `user_name_unique` (`user_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- ----------------------------
+-- Table structure for t_escheduler_worker_server
+-- ----------------------------
+DROP TABLE IF EXISTS `t_escheduler_worker_server`;
+CREATE TABLE `t_escheduler_worker_server` (
+ `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
+ `host` varchar(45) DEFAULT NULL COMMENT 'ip',
+ `port` int(11) DEFAULT NULL COMMENT '进程号',
+ `zk_directory` varchar(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL COMMENT 'zk注册目录',
+ `res_info` varchar(255) DEFAULT NULL COMMENT '集群资源信息:json格式{"cpu":xxx,"memroy":xxx}',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `last_heartbeat_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+-- Records of t_escheduler_user,user : admin , password : escheduler123
+INSERT INTO `t_escheduler_user` VALUES ('1', 'admin', '055a97b5fcd6d120372ad1976518f371', '0', '825193156@qq.com', '15001335629', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22');
+INSERT INTO `t_escheduler_alertgroup` VALUES (1, 'escheduler管理员告警组', '0', 'escheduler管理员告警组','2018-11-29 10:20:39', '2018-11-29 10:20:39');
+INSERT INTO `t_escheduler_relation_user_alertgroup` VALUES ('1', '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
+
+-- Records of t_escheduler_queue,default queue name : default
+INSERT INTO `t_escheduler_queue` VALUES ('1', 'default', 'default');
+
+
diff --git a/sql/quartz.sql b/sql/quartz.sql
new file mode 100644
index 0000000000..22754b39dc
--- /dev/null
+++ b/sql/quartz.sql
@@ -0,0 +1,179 @@
+ #
+ # In your Quartz properties file, you'll need to set
+ # org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
+ #
+ #
+ # By: Ron Cordell - roncordell
+ # I didn't see this anywhere, so I thought I'd post it here. This is the script from Quartz to create the tables in a MySQL database, modified to use INNODB instead of MYISAM.
+
+ DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
+ DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
+ DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
+ DROP TABLE IF EXISTS QRTZ_LOCKS;
+ DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
+ DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
+ DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
+ DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
+ DROP TABLE IF EXISTS QRTZ_TRIGGERS;
+ DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
+ DROP TABLE IF EXISTS QRTZ_CALENDARS;
+
+ CREATE TABLE QRTZ_JOB_DETAILS(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ JOB_CLASS_NAME VARCHAR(250) NOT NULL,
+ IS_DURABLE VARCHAR(1) NOT NULL,
+ IS_NONCONCURRENT VARCHAR(1) NOT NULL,
+ IS_UPDATE_DATA VARCHAR(1) NOT NULL,
+ REQUESTS_RECOVERY VARCHAR(1) NOT NULL,
+ JOB_DATA BLOB NULL,
+ PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_TRIGGERS (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ NEXT_FIRE_TIME BIGINT(13) NULL,
+ PREV_FIRE_TIME BIGINT(13) NULL,
+ PRIORITY INTEGER NULL,
+ TRIGGER_STATE VARCHAR(16) NOT NULL,
+ TRIGGER_TYPE VARCHAR(8) NOT NULL,
+ START_TIME BIGINT(13) NOT NULL,
+ END_TIME BIGINT(13) NULL,
+ CALENDAR_NAME VARCHAR(200) NULL,
+ MISFIRE_INSTR SMALLINT(2) NULL,
+ JOB_DATA BLOB NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+ REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ REPEAT_COUNT BIGINT(7) NOT NULL,
+ REPEAT_INTERVAL BIGINT(12) NOT NULL,
+ TIMES_TRIGGERED BIGINT(10) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_CRON_TRIGGERS (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ CRON_EXPRESSION VARCHAR(120) NOT NULL,
+ TIME_ZONE_ID VARCHAR(80),
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_SIMPROP_TRIGGERS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ STR_PROP_1 VARCHAR(512) NULL,
+ STR_PROP_2 VARCHAR(512) NULL,
+ STR_PROP_3 VARCHAR(512) NULL,
+ INT_PROP_1 INT NULL,
+ INT_PROP_2 INT NULL,
+ LONG_PROP_1 BIGINT NULL,
+ LONG_PROP_2 BIGINT NULL,
+ DEC_PROP_1 NUMERIC(13,4) NULL,
+ DEC_PROP_2 NUMERIC(13,4) NULL,
+ BOOL_PROP_1 VARCHAR(1) NULL,
+ BOOL_PROP_2 VARCHAR(1) NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_BLOB_TRIGGERS (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ BLOB_DATA BLOB NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ INDEX (SCHED_NAME,TRIGGER_NAME, TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_CALENDARS (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ CALENDAR_NAME VARCHAR(200) NOT NULL,
+ CALENDAR BLOB NOT NULL,
+ PRIMARY KEY (SCHED_NAME,CALENDAR_NAME))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_FIRED_TRIGGERS (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ ENTRY_ID VARCHAR(95) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ FIRED_TIME BIGINT(13) NOT NULL,
+ SCHED_TIME BIGINT(13) NOT NULL,
+ PRIORITY INTEGER NOT NULL,
+ STATE VARCHAR(16) NOT NULL,
+ JOB_NAME VARCHAR(200) NULL,
+ JOB_GROUP VARCHAR(200) NULL,
+ IS_NONCONCURRENT VARCHAR(1) NULL,
+ REQUESTS_RECOVERY VARCHAR(1) NULL,
+ PRIMARY KEY (SCHED_NAME,ENTRY_ID))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_SCHEDULER_STATE (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ LAST_CHECKIN_TIME BIGINT(13) NOT NULL,
+ CHECKIN_INTERVAL BIGINT(13) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,INSTANCE_NAME))
+ ENGINE=InnoDB;
+
+ CREATE TABLE QRTZ_LOCKS (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ LOCK_NAME VARCHAR(40) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,LOCK_NAME))
+ ENGINE=InnoDB;
+
+ CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
+ CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
+
+ CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
+ CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
+ CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
+ CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
+ CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
+ CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+ CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+ CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
+ CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+ CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+ CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+ CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+ CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
+ CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+ CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
+ CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
+ CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+ CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
+
+ commit;
\ No newline at end of file