Browse Source

Merge remote-tracking branch 'upstream/dev-1.1.0' into dev-1.1.0

pull/2/head
lidongdai 5 years ago
parent
commit
497cb97bc0
  1. 4
      escheduler-api/src/main/java/cn/escheduler/api/controller/MonitorController.java
  2. 1
      escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java
  3. 9
      escheduler-api/src/main/java/cn/escheduler/api/service/DataSourceService.java
  4. 41
      escheduler-api/src/main/java/cn/escheduler/api/service/MonitorService.java
  5. 10
      escheduler-api/src/main/java/cn/escheduler/api/service/SchedulerService.java
  6. 30
      escheduler-api/src/main/java/cn/escheduler/api/service/TenantService.java
  7. 40
      escheduler-api/src/main/java/cn/escheduler/api/service/UsersService.java
  8. 39
      escheduler-api/src/main/java/cn/escheduler/api/utils/ZookeeperMonitor.java
  9. 29
      escheduler-api/src/test/java/cn/escheduler/api/utils/ZookeeperMonitorUtilsTest.java
  10. 4
      escheduler-common/src/main/java/cn/escheduler/common/Constants.java
  11. 10
      escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java
  12. 6
      escheduler-common/src/main/java/cn/escheduler/common/utils/IpUtils.java
  13. 87
      escheduler-common/src/main/java/cn/escheduler/common/zk/AbstractZKClient.java
  14. 4
      escheduler-common/src/test/java/cn/escheduler/common/queue/TaskQueueImplTest.java
  15. 41
      escheduler-common/src/test/java/cn/escheduler/common/utils/IpUtilsTest.java
  16. 8
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java
  17. 2
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java
  18. 35
      escheduler-server/src/main/java/cn/escheduler/server/ResInfo.java
  19. 67
      escheduler-server/src/main/java/cn/escheduler/server/zk/ZKMasterClient.java
  20. 31
      escheduler-server/src/main/java/cn/escheduler/server/zk/ZKWorkerClient.java
  21. 5
      escheduler-server/src/test/java/cn/escheduler/server/zk/ZKWorkerClientTest.java
  22. 4
      install.sh
  23. 436
      sql/escheduler.sql
  24. 179
      sql/quartz.sql

4
escheduler-api/src/main/java/cn/escheduler/api/controller/MonitorController.java

@ -66,7 +66,7 @@ public class MonitorController extends BaseController{
logger.info("login user: {}, query all master", loginUser.getUserName()); logger.info("login user: {}, query all master", loginUser.getUserName());
try{ try{
logger.info("list master, user:{}", loginUser.getUserName()); logger.info("list master, user:{}", loginUser.getUserName());
Map<String, Object> result = serverService.queryMaster(loginUser); Map<String, Object> result = monitorService.queryMaster(loginUser);
return returnDataList(result); return returnDataList(result);
}catch (Exception e){ }catch (Exception e){
logger.error(LIST_MASTERS_ERROR.getMsg(),e); logger.error(LIST_MASTERS_ERROR.getMsg(),e);
@ -86,7 +86,7 @@ public class MonitorController extends BaseController{
public Result listWorker(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { public Result listWorker(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) {
logger.info("login user: {}, query all workers", loginUser.getUserName()); logger.info("login user: {}, query all workers", loginUser.getUserName());
try{ try{
Map<String, Object> result = serverService.queryWorker(loginUser); Map<String, Object> result = monitorService.queryWorker(loginUser);
return returnDataList(result); return returnDataList(result);
}catch (Exception e){ }catch (Exception e){
logger.error(LIST_WORKERS_ERROR.getMsg(),e); logger.error(LIST_WORKERS_ERROR.getMsg(),e);

1
escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java

@ -163,6 +163,7 @@ public enum Status {
BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR(10117,"batch delete process instance by ids {0} error"), BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR(10117,"batch delete process instance by ids {0} error"),
PREVIEW_SCHEDULE_ERROR(10139,"preview schedule error"), PREVIEW_SCHEDULE_ERROR(10139,"preview schedule error"),
PARSE_TO_CRON_EXPRESSION_ERROR(10140,"parse cron to cron expression error"), PARSE_TO_CRON_EXPRESSION_ERROR(10140,"parse cron to cron expression error"),
SCHEDULE_START_TIME_END_TIME_SAME(10141,"The start time must not be the same as the end"),
UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found"), UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found"),

9
escheduler-api/src/main/java/cn/escheduler/api/service/DataSourceService.java

@ -67,14 +67,10 @@ public class DataSourceService extends BaseService{
public static final String PASSWORD = cn.escheduler.common.Constants.PASSWORD; public static final String PASSWORD = cn.escheduler.common.Constants.PASSWORD;
public static final String OTHER = "other"; public static final String OTHER = "other";
@Autowired
private ProjectMapper projectMapper;
@Autowired @Autowired
private DataSourceMapper dataSourceMapper; private DataSourceMapper dataSourceMapper;
@Autowired
private ProjectService projectService;
@Autowired @Autowired
private DatasourceUserMapper datasourceUserMapper; private DatasourceUserMapper datasourceUserMapper;
@ -522,7 +518,10 @@ public class DataSourceService extends BaseService{
parameterMap.put(Constants.JDBC_URL, jdbcUrl); parameterMap.put(Constants.JDBC_URL, jdbcUrl);
parameterMap.put(Constants.USER, userName); parameterMap.put(Constants.USER, userName);
parameterMap.put(Constants.PASSWORD, password); parameterMap.put(Constants.PASSWORD, password);
parameterMap.put(Constants.PRINCIPAL,principal); if (CommonUtils.getKerberosStartupState() &&
(type == DbType.HIVE || type == DbType.SPARK)){
parameterMap.put(Constants.PRINCIPAL,principal);
}
if (other != null && !"".equals(other)) { if (other != null && !"".equals(other)) {
Map map = JSONObject.parseObject(other, new TypeReference<LinkedHashMap<String, String>>() { Map map = JSONObject.parseObject(other, new TypeReference<LinkedHashMap<String, String>>() {
}); });

41
escheduler-api/src/main/java/cn/escheduler/api/service/MonitorService.java

@ -18,13 +18,16 @@ package cn.escheduler.api.service;
import cn.escheduler.api.enums.Status; import cn.escheduler.api.enums.Status;
import cn.escheduler.api.utils.Constants; import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.ZookeeperMonitorUtils; import cn.escheduler.api.utils.ZookeeperMonitor;
import cn.escheduler.dao.MonitorDBDao; import cn.escheduler.dao.MonitorDBDao;
import cn.escheduler.dao.model.MasterServer;
import cn.escheduler.dao.model.MonitorRecord; import cn.escheduler.dao.model.MonitorRecord;
import cn.escheduler.dao.model.User; import cn.escheduler.dao.model.User;
import cn.escheduler.dao.model.ZookeeperRecord; import cn.escheduler.dao.model.ZookeeperRecord;
import org.apache.hadoop.mapred.Master;
import org.springframework.stereotype.Service; import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -52,6 +55,22 @@ public class MonitorService extends BaseService{
} }
/**
* query master list
*
* @param loginUser
* @return
*/
public Map<String,Object> queryMaster(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<MasterServer> masterServers = new ZookeeperMonitor().getMasterServers();
result.put(Constants.DATA_LIST, masterServers);
putMsg(result,Status.SUCCESS);
return result;
}
/** /**
* query zookeeper state * query zookeeper state
@ -61,7 +80,7 @@ public class MonitorService extends BaseService{
public Map<String,Object> queryZookeeperState(User loginUser) { public Map<String,Object> queryZookeeperState(User loginUser) {
Map<String, Object> result = new HashMap<>(5); Map<String, Object> result = new HashMap<>(5);
List<ZookeeperRecord> zookeeperRecordList = ZookeeperMonitorUtils.zookeeperInfoList(); List<ZookeeperRecord> zookeeperRecordList = ZookeeperMonitor.zookeeperInfoList();
result.put(Constants.DATA_LIST, zookeeperRecordList); result.put(Constants.DATA_LIST, zookeeperRecordList);
putMsg(result, Status.SUCCESS); putMsg(result, Status.SUCCESS);
@ -69,4 +88,22 @@ public class MonitorService extends BaseService{
return result; return result;
} }
/**
* query master list
*
* @param loginUser
* @return
*/
public Map<String,Object> queryWorker(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<MasterServer> workerServers = new ZookeeperMonitor().getWorkerServers();
result.put(Constants.DATA_LIST, workerServers);
putMsg(result,Status.SUCCESS);
return result;
}
} }

10
escheduler-api/src/main/java/cn/escheduler/api/service/SchedulerService.java

@ -119,6 +119,11 @@ public class SchedulerService extends BaseService {
scheduleObj.setProcessDefinitionName(processDefinition.getName()); scheduleObj.setProcessDefinitionName(processDefinition.getName());
ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class); ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class);
if (DateUtils.differSec(scheduleParam.getStartTime(),scheduleParam.getEndTime()) == 0) {
logger.warn("The start time must not be the same as the end");
putMsg(result,Status.SCHEDULE_START_TIME_END_TIME_SAME);
return result;
}
scheduleObj.setStartTime(scheduleParam.getStartTime()); scheduleObj.setStartTime(scheduleParam.getStartTime());
scheduleObj.setEndTime(scheduleParam.getEndTime()); scheduleObj.setEndTime(scheduleParam.getEndTime());
if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) { if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) {
@ -205,6 +210,11 @@ public class SchedulerService extends BaseService {
// updateProcessInstance param // updateProcessInstance param
if (StringUtils.isNotEmpty(scheduleExpression)) { if (StringUtils.isNotEmpty(scheduleExpression)) {
ScheduleParam scheduleParam = JSONUtils.parseObject(scheduleExpression, ScheduleParam.class); ScheduleParam scheduleParam = JSONUtils.parseObject(scheduleExpression, ScheduleParam.class);
if (DateUtils.differSec(scheduleParam.getStartTime(),scheduleParam.getEndTime()) == 0) {
logger.warn("The start time must not be the same as the end");
putMsg(result,Status.SCHEDULE_START_TIME_END_TIME_SAME);
return result;
}
schedule.setStartTime(scheduleParam.getStartTime()); schedule.setStartTime(scheduleParam.getStartTime());
schedule.setEndTime(scheduleParam.getEndTime()); schedule.setEndTime(scheduleParam.getEndTime());
if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) { if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) {

30
escheduler-api/src/main/java/cn/escheduler/api/service/TenantService.java

@ -239,24 +239,25 @@ public class TenantService extends BaseService{
if (PropertyUtils.getResUploadStartupState()){ if (PropertyUtils.getResUploadStartupState()){
String tenantPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode(); String tenantPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode();
String resourcePath = HadoopUtils.getHdfsDir(tenant.getTenantCode()); if (HadoopUtils.getInstance().exists(tenantPath)){
FileStatus[] fileStatus = HadoopUtils.getInstance().listFileStatus(resourcePath); String resourcePath = HadoopUtils.getHdfsDir(tenant.getTenantCode());
if (fileStatus.length > 0) { FileStatus[] fileStatus = HadoopUtils.getInstance().listFileStatus(resourcePath);
putMsg(result, Status.HDFS_TERANT_RESOURCES_FILE_EXISTS); if (fileStatus.length > 0) {
return result; putMsg(result, Status.HDFS_TERANT_RESOURCES_FILE_EXISTS);
} return result;
fileStatus = HadoopUtils.getInstance().listFileStatus(HadoopUtils.getHdfsUdfDir(tenant.getTenantCode())); }
if (fileStatus.length > 0) { fileStatus = HadoopUtils.getInstance().listFileStatus(HadoopUtils.getHdfsUdfDir(tenant.getTenantCode()));
putMsg(result, Status.HDFS_TERANT_UDFS_FILE_EXISTS); if (fileStatus.length > 0) {
return result; putMsg(result, Status.HDFS_TERANT_UDFS_FILE_EXISTS);
} return result;
}
HadoopUtils.getInstance().delete(tenantPath, true); HadoopUtils.getInstance().delete(tenantPath, true);
}
} }
tenantMapper.deleteById(id); tenantMapper.deleteById(id);
putMsg(result, Status.SUCCESS); putMsg(result, Status.SUCCESS);
return result; return result;
} }
@ -269,9 +270,6 @@ public class TenantService extends BaseService{
public Map<String, Object> queryTenantList(User loginUser) { public Map<String, Object> queryTenantList(User loginUser) {
Map<String, Object> result = new HashMap<>(5); Map<String, Object> result = new HashMap<>(5);
// if (checkAdmin(loginUser, result)) {
// return result;
// }
List<Tenant> resourceList = tenantMapper.queryAllTenant(); List<Tenant> resourceList = tenantMapper.queryAllTenant();
result.put(Constants.DATA_LIST, resourceList); result.put(Constants.DATA_LIST, resourceList);

40
escheduler-api/src/main/java/cn/escheduler/api/service/UsersService.java

@ -245,35 +245,35 @@ public class UsersService extends BaseService {
Tenant newTenant = tenantMapper.queryById(tenantId); Tenant newTenant = tenantMapper.queryById(tenantId);
if (newTenant != null) { if (newTenant != null) {
// if hdfs startup // if hdfs startup
if (PropertyUtils.getResUploadStartupState()){ if (PropertyUtils.getResUploadStartupState() && oldTenant != null){
String newTenantCode = newTenant.getTenantCode(); String newTenantCode = newTenant.getTenantCode();
String oldResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/resources"; String oldResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/resources";
String oldUdfsPath = HadoopUtils.getHdfsUdfDir(oldTenant.getTenantCode()); String oldUdfsPath = HadoopUtils.getHdfsUdfDir(oldTenant.getTenantCode());
if (HadoopUtils.getInstance().exists(oldResourcePath)){
String newResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenantCode + "/resources";
String newUdfsPath = HadoopUtils.getHdfsUdfDir(newTenantCode);
String newResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenantCode + "/resources"; //file resources list
String newUdfsPath = HadoopUtils.getHdfsUdfDir(newTenantCode); List<Resource> fileResourcesList = resourceMapper.queryResourceCreatedByUser(userId, 0);
if (CollectionUtils.isNotEmpty(fileResourcesList)) {
//file resources list for (Resource resource : fileResourcesList) {
List<Resource> fileResourcesList = resourceMapper.queryResourceCreatedByUser(userId, 0); HadoopUtils.getInstance().copy(oldResourcePath + "/" + resource.getAlias(), newResourcePath, false, true);
if (CollectionUtils.isNotEmpty(fileResourcesList)) { }
for (Resource resource : fileResourcesList) {
HadoopUtils.getInstance().copy(oldResourcePath + "/" + resource.getAlias(), newResourcePath, false, true);
} }
}
//udf resources //udf resources
List<Resource> udfResourceList = resourceMapper.queryResourceCreatedByUser(userId, 1); List<Resource> udfResourceList = resourceMapper.queryResourceCreatedByUser(userId, 1);
if (CollectionUtils.isNotEmpty(udfResourceList)) { if (CollectionUtils.isNotEmpty(udfResourceList)) {
for (Resource resource : udfResourceList) { for (Resource resource : udfResourceList) {
HadoopUtils.getInstance().copy(oldUdfsPath + "/" + resource.getAlias(), newUdfsPath, false, true); HadoopUtils.getInstance().copy(oldUdfsPath + "/" + resource.getAlias(), newUdfsPath, false, true);
}
} }
}
//Delete the user from the old tenant directory
String oldUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/home/" + userId;
HadoopUtils.getInstance().delete(oldUserPath, true);
//Delete the user from the old tenant directory
String oldUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/home/" + userId;
HadoopUtils.getInstance().delete(oldUserPath, true);
}
//create user in the new tenant directory //create user in the new tenant directory
String newUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenant.getTenantCode() + "/home/" + user.getId(); String newUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenant.getTenantCode() + "/home/" + user.getId();

39
escheduler-api/src/main/java/cn/escheduler/api/utils/ZookeeperMonitorUtils.java → escheduler-api/src/main/java/cn/escheduler/api/utils/ZookeeperMonitor.java

@ -1,7 +1,9 @@
package cn.escheduler.api.utils; package cn.escheduler.api.utils;
import cn.escheduler.common.zk.AbstractZKClient; import cn.escheduler.common.zk.AbstractZKClient;
import cn.escheduler.dao.model.MasterServer;
import cn.escheduler.dao.model.ZookeeperRecord; import cn.escheduler.dao.model.ZookeeperRecord;
import cn.escheduler.server.ResInfo;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -9,14 +11,15 @@ import org.slf4j.LoggerFactory;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Date; import java.util.Date;
import java.util.List; import java.util.List;
import java.util.Map;
/** /**
* monitor zookeeper info * monitor zookeeper info
*/ */
public class ZookeeperMonitorUtils { public class ZookeeperMonitor extends AbstractZKClient{
private static final Logger LOG = LoggerFactory.getLogger(ZookeeperMonitorUtils.class); private static final Logger LOG = LoggerFactory.getLogger(ZookeeperMonitor.class);
private static final String zookeeperList = AbstractZKClient.getZookeeperQuorum(); private static final String zookeeperList = AbstractZKClient.getZookeeperQuorum();
/** /**
@ -33,6 +36,38 @@ public class ZookeeperMonitorUtils {
return null; return null;
} }
/**
* get server list.
* @param isMaster
* @return
*/
public List<MasterServer> getServers(boolean isMaster){
List<MasterServer> masterServers = new ArrayList<>();
Map<String, String> masterMap = getServerList(isMaster);
String parentPath = isMaster ? getMasterZNodeParentPath() : getWorkerZNodeParentPath();
for(String path : masterMap.keySet()){
MasterServer masterServer = ResInfo.parseHeartbeatForZKInfo(masterMap.get(path));
masterServer.setZkDirectory( parentPath + "/"+ path);
masterServers.add(masterServer);
}
return masterServers;
}
/**
* get master servers
* @return
*/
public List<MasterServer> getMasterServers(){
return getServers(true);
}
/**
* master construct is the same with worker, use the master instead
* @return
*/
public List<MasterServer> getWorkerServers(){
return getServers(false);
}
private static List<ZookeeperRecord> zookeeperInfoList(String zookeeperServers) { private static List<ZookeeperRecord> zookeeperInfoList(String zookeeperServers) {

29
escheduler-api/src/test/java/cn/escheduler/api/utils/ZookeeperMonitorUtilsTest.java

@ -0,0 +1,29 @@
package cn.escheduler.api.utils;
import cn.escheduler.dao.model.MasterServer;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
public class ZookeeperMonitorUtilsTest {
@Test
public void testGetMasterLsit(){
ZookeeperMonitor zookeeperMonitor = new ZookeeperMonitor();
List<MasterServer> masterServerList = zookeeperMonitor.getMasterServers();
List<MasterServer> workerServerList = zookeeperMonitor.getWorkerServers();
Assert.assertEquals(masterServerList.size(), 1);
Assert.assertEquals(workerServerList.size(), 1);
}
}

4
escheduler-common/src/main/java/cn/escheduler/common/Constants.java

@ -337,7 +337,7 @@ public final class Constants {
/** /**
* email regex * email regex
*/ */
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+$"); public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$");
/** /**
* read permission * read permission
@ -490,7 +490,7 @@ public final class Constants {
public static final String DEFAULT = "Default"; public static final String DEFAULT = "Default";
public static final String PASSWORD = "password"; public static final String PASSWORD = "password";
public static final String XXXXXX = "xxxxxx"; public static final String XXXXXX = "******";
public static String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd"; public static String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd";

10
escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java

@ -417,16 +417,6 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
} }
} }
/**
* get zookeeper client of CuratorFramework
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/** /**
* Get the task queue path * Get the task queue path
* @param key task queue name * @param key task queue name

6
escheduler-common/src/main/java/cn/escheduler/common/utils/IpUtils.java

@ -61,10 +61,4 @@ public class IpUtils {
return sb.toString(); return sb.toString();
} }
public static void main(String[] args){
long ipLong = ipToLong("11.3.4.5");
logger.info(longToIp(ipLong));
}
} }

87
escheduler-common/src/main/java/cn/escheduler/common/zk/AbstractZKClient.java

@ -30,13 +30,12 @@ import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.curator.framework.state.ConnectionState; import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.framework.state.ConnectionStateListener; import org.apache.curator.framework.state.ConnectionStateListener;
import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.CreateMode;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.util.ArrayList; import java.util.*;
import java.util.Date;
import java.util.List;
import static cn.escheduler.common.Constants.*; import static cn.escheduler.common.Constants.*;
@ -213,9 +212,9 @@ public abstract class AbstractZKClient {
protected void initSystemZNode(){ protected void initSystemZNode(){
try { try {
// read master node parent path from conf // read master node parent path from conf
masterZNodeParentPath = conf.getString(Constants.ZOOKEEPER_ESCHEDULER_MASTERS); masterZNodeParentPath = getMasterZNodeParentPath();
// read worker node parent path from conf // read worker node parent path from conf
workerZNodeParentPath = conf.getString(Constants.ZOOKEEPER_ESCHEDULER_WORKERS); workerZNodeParentPath = getWorkerZNodeParentPath();
// read server node parent path from conf // read server node parent path from conf
deadServerZNodeParentPath = conf.getString(ZOOKEEPER_ESCHEDULER_DEAD_SERVERS); deadServerZNodeParentPath = conf.getString(ZOOKEEPER_ESCHEDULER_DEAD_SERVERS);
@ -243,6 +242,7 @@ public abstract class AbstractZKClient {
} }
} }
public void removeDeadServerByHost(String host, String serverType) throws Exception { public void removeDeadServerByHost(String host, String serverType) throws Exception {
List<String> deadServers = zkClient.getChildren().forPath(deadServerZNodeParentPath); List<String> deadServers = zkClient.getChildren().forPath(deadServerZNodeParentPath);
for(String serverPath : deadServers){ for(String serverPath : deadServers){
@ -291,6 +291,8 @@ public abstract class AbstractZKClient {
} }
/** /**
* for stop server * for stop server
* @param serverStoppable * @param serverStoppable
@ -339,6 +341,81 @@ public abstract class AbstractZKClient {
return sb.toString(); return sb.toString();
} }
/**
* get master server list map.
* result : {host : resource info}
* @return
*/
public Map<String, String> getServerList(boolean isMaster ){
Map<String, String> masterMap = new HashMap<>();
try {
String path = isMaster ? getMasterZNodeParentPath() : getWorkerZNodeParentPath();
List<String> serverList = getZkClient().getChildren().forPath(path);
for(String server : serverList){
byte[] bytes = getZkClient().getData().forPath(path + "/" + server);
masterMap.putIfAbsent(server, new String(bytes));
}
} catch (Exception e) {
e.printStackTrace();
}
return masterMap;
}
/**
* get zkclient
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/**
* get worker node parent path
* @return
*/
protected String getWorkerZNodeParentPath(){return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_WORKERS);};
/**
* get master node parent path
* @return
*/
protected String getMasterZNodeParentPath(){return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_MASTERS);}
/**
* get master lock path
* @return
*/
public String getMasterLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_MASTERS);
}
/**
* get master start up lock path
* @return
*/
public String getMasterStartUpLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS);
}
/**
* get master failover lock path
* @return
*/
public String getMasterFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_MASTERS);
}
/**
* get worker failover lock path
* @return
*/
public String getWorkerFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_WORKERS);
}
@Override @Override
public String toString() { public String toString() {
return "AbstractZKClient{" + return "AbstractZKClient{" +

4
escheduler-common/src/test/java/cn/escheduler/common/queue/TaskQueueImplTest.java

@ -20,13 +20,11 @@ import cn.escheduler.common.Constants;
import cn.escheduler.common.utils.IpUtils; import cn.escheduler.common.utils.IpUtils;
import cn.escheduler.common.utils.OSUtils; import cn.escheduler.common.utils.OSUtils;
import org.junit.After; import org.junit.After;
import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
@ -84,8 +82,6 @@ public class TaskQueueImplTest {
return; return;
} }
String node2 = tasks.get(0);
} }

41
escheduler-common/src/test/java/cn/escheduler/common/utils/IpUtilsTest.java

@ -0,0 +1,41 @@
package cn.escheduler.common.utils;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.*;
public class IpUtilsTest {
@Test
public void ipToLong() {
String ip = "192.168.110.1";
String ip2 = "0.0.0.0";
long longNumber = IpUtils.ipToLong(ip);
long longNumber2 = IpUtils.ipToLong(ip2);
System.out.println(longNumber);
Assert.assertEquals(longNumber, 3232263681L);
Assert.assertEquals(longNumber2, 0L);
String ip3 = "255.255.255.255";
long longNumber3 = IpUtils.ipToLong(ip3);
System.out.println(longNumber3);
Assert.assertEquals(longNumber3, 4294967295L);
}
@Test
public void longToIp() {
String ip = "192.168.110.1";
String ip2 = "0.0.0.0";
long longNum = 3232263681L;
String i1 = IpUtils.longToIp(longNum);
String i2 = IpUtils.longToIp(0);
Assert.assertEquals(ip, i1);
Assert.assertEquals(ip2, i2);
}
}

8
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java

@ -30,13 +30,15 @@ public class CreateEscheduler {
public static void main(String[] args) { public static void main(String[] args) {
EschedulerManager eschedulerManager = new EschedulerManager(); EschedulerManager eschedulerManager = new EschedulerManager();
eschedulerManager.initEscheduler();
logger.info("init escheduler finished");
try { try {
eschedulerManager.initEscheduler();
logger.info("init escheduler finished");
eschedulerManager.upgradeEscheduler(); eschedulerManager.upgradeEscheduler();
logger.info("upgrade escheduler finished"); logger.info("upgrade escheduler finished");
logger.info("create escheduler success");
} catch (Exception e) { } catch (Exception e) {
logger.error("upgrade escheduler failed",e); logger.error("create escheduler failed",e);
} }
} }

2
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java

@ -30,7 +30,7 @@ public class UpgradeEscheduler {
EschedulerManager eschedulerManager = new EschedulerManager(); EschedulerManager eschedulerManager = new EschedulerManager();
try { try {
eschedulerManager.upgradeEscheduler(); eschedulerManager.upgradeEscheduler();
logger.info("upgrade escheduler finished"); logger.info("upgrade escheduler success");
} catch (Exception e) { } catch (Exception e) {
logger.error(e.getMessage(),e); logger.error(e.getMessage(),e);
logger.info("Upgrade escheduler failed"); logger.info("Upgrade escheduler failed");

35
escheduler-server/src/main/java/cn/escheduler/server/ResInfo.java

@ -17,8 +17,12 @@
package cn.escheduler.server; package cn.escheduler.server;
import cn.escheduler.common.Constants; import cn.escheduler.common.Constants;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.common.utils.JSONUtils; import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.common.utils.OSUtils; import cn.escheduler.common.utils.OSUtils;
import cn.escheduler.dao.model.MasterServer;
import java.util.Date;
/** /**
* heartbeat for ZK reigster res info * heartbeat for ZK reigster res info
@ -98,6 +102,16 @@ public class ResInfo {
} }
public static String getHeartBeatInfo(Date now){
return buildHeartbeatForZKInfo(OSUtils.getHost(),
OSUtils.getProcessID(),
OSUtils.cpuUsage(),
OSUtils.memoryUsage(),
DateUtils.dateToString(now),
DateUtils.dateToString(now));
}
/** /**
* build heartbeat info for zk * build heartbeat info for zk
* @param host * @param host
@ -119,4 +133,25 @@ public class ResInfo {
+ lastHeartbeatTime; + lastHeartbeatTime;
} }
/**
* parse heartbeat info for zk
* @param heartBeatInfo
* @return
*/
public static MasterServer parseHeartbeatForZKInfo(String heartBeatInfo){
MasterServer masterServer = null;
String[] masterArray = heartBeatInfo.split(Constants.COMMA);
if(masterArray.length != 6){
return masterServer;
}
masterServer = new MasterServer();
masterServer.setHost(masterArray[0]);
masterServer.setPort(Integer.parseInt(masterArray[1]));
masterServer.setResInfo(getResInfoJson(Double.parseDouble(masterArray[2]), Double.parseDouble(masterArray[3])));
masterServer.setCreateTime(DateUtils.stringToDate(masterArray[4]));
masterServer.setLastHeartbeatTime(DateUtils.stringToDate(masterArray[5]));
return masterServer;
}
} }

67
escheduler-server/src/main/java/cn/escheduler/server/zk/ZKMasterClient.java

@ -204,7 +204,7 @@ public class ZKMasterClient extends AbstractZKClient {
} }
// specify the format of stored data in ZK nodes // specify the format of stored data in ZK nodes
String heartbeatZKInfo = getOsInfo(now); String heartbeatZKInfo = ResInfo.getHeartBeatInfo(now);
// create temporary sequence nodes for master znode // create temporary sequence nodes for master znode
masterZNode = zkClient.create().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath( masterZNode = zkClient.create().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(
masterZNodeParentPath + "/" + OSUtils.getHost() + "_", heartbeatZKInfo.getBytes()); masterZNodeParentPath + "/" + OSUtils.getHost() + "_", heartbeatZKInfo.getBytes());
@ -259,10 +259,10 @@ public class ZKMasterClient extends AbstractZKClient {
return false; return false;
} }
List<String> masterZNodeList = null; List<String> serverList = null;
masterZNodeList = zkClient.getChildren().forPath(path); serverList = zkClient.getChildren().forPath(path);
if (CollectionUtils.isNotEmpty(masterZNodeList)){ if (CollectionUtils.isNotEmpty(serverList)){
for (String masterZNode : masterZNodeList){ for (String masterZNode : serverList){
if (masterZNode.startsWith(host)){ if (masterZNode.startsWith(host)){
return true; return true;
} }
@ -423,22 +423,6 @@ public class ZKMasterClient extends AbstractZKClient {
} }
/**
* get os info
* @param now
* @return
*/
private String getOsInfo(Date now) {
return ResInfo.buildHeartbeatForZKInfo(OSUtils.getHost(),
OSUtils.getProcessID(),
OSUtils.cpuUsage(),
OSUtils.memoryUsage(),
DateUtils.dateToString(now),
DateUtils.dateToString(now));
}
/** /**
* get master znode * get master znode
* @return * @return
@ -448,45 +432,6 @@ public class ZKMasterClient extends AbstractZKClient {
} }
/**
* get master lock path
* @return
*/
public String getMasterLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_MASTERS);
}
/**
* get master start up lock path
* @return
*/
public String getMasterStartUpLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS);
}
/**
* get master failover lock path
* @return
*/
public String getMasterFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_MASTERS);
}
/**
* get worker failover lock path
* @return
*/
public String getWorkerFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_WORKERS);
}
/**
* get zkclient
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/** /**
@ -580,7 +525,7 @@ public class ZKMasterClient extends AbstractZKClient {
} }
/** /**
* get host ip * get host ip, string format: masterParentPath/ip_000001/value
* @param path * @param path
* @return * @return
*/ */

31
escheduler-server/src/main/java/cn/escheduler/server/zk/ZKWorkerClient.java

@ -116,11 +116,10 @@ public class ZKWorkerClient extends AbstractZKClient {
public String initWorkZNode() throws Exception { public String initWorkZNode() throws Exception {
Date now = new Date(); String heartbeatZKInfo = ResInfo.getHeartBeatInfo(new Date());
String heartbeatZKInfo = getOsInfo(now);
workerZNode = workerZNodeParentPath + "/" + OSUtils.getHost() + "_"; workerZNode = workerZNodeParentPath + "/" + OSUtils.getHost() + "_";
workerZNode = zkClient.create().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(workerZNode, workerZNode = zkClient.create().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(workerZNode,
heartbeatZKInfo.getBytes()); heartbeatZKInfo.getBytes());
logger.info("register worker node {} success", workerZNode); logger.info("register worker node {} success", workerZNode);
@ -141,7 +140,6 @@ public class ZKWorkerClient extends AbstractZKClient {
workerZNode = workerZNodeParentPath + "/" + OSUtils.getHost() + "_"; workerZNode = workerZNodeParentPath + "/" + OSUtils.getHost() + "_";
List<String> workerZNodeList = zkClient.getChildren().forPath(workerZNodeParentPath); List<String> workerZNodeList = zkClient.getChildren().forPath(workerZNodeParentPath);
if (CollectionUtils.isNotEmpty(workerZNodeList)){ if (CollectionUtils.isNotEmpty(workerZNodeList)){
boolean flag = false; boolean flag = false;
for (String workerZNode : workerZNodeList){ for (String workerZNode : workerZNodeList){
@ -241,21 +239,6 @@ public class ZKWorkerClient extends AbstractZKClient {
} }
/**
* get os info
* @param now
* @return
*/
private String getOsInfo(Date now) {
return ResInfo.buildHeartbeatForZKInfo(OSUtils.getHost(),
OSUtils.getProcessID(),
OSUtils.cpuUsage(),
OSUtils.memoryUsage(),
DateUtils.dateToString(now),
DateUtils.dateToString(now));
}
/** /**
* get worker znode * get worker znode
* @return * @return
@ -264,16 +247,6 @@ public class ZKWorkerClient extends AbstractZKClient {
return workerZNode; return workerZNode;
} }
/**
* get zkclient
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/** /**
* get worker lock path * get worker lock path
* @return * @return

5
escheduler-server/src/test/java/cn/escheduler/server/zk/ZKWorkerClientTest.java

@ -1,6 +1,7 @@
package cn.escheduler.server.zk; package cn.escheduler.server.zk;
import cn.escheduler.common.Constants; import cn.escheduler.common.Constants;
import cn.escheduler.common.zk.AbstractZKClient;
import org.junit.Test; import org.junit.Test;
import java.util.Arrays; import java.util.Arrays;
@ -17,8 +18,8 @@ public class ZKWorkerClientTest {
public void getZKWorkerClient() throws Exception { public void getZKWorkerClient() throws Exception {
ZKWorkerClient zkWorkerClient = ZKWorkerClient.getZKWorkerClient(); // ZKWorkerClient zkWorkerClient = ZKWorkerClient.getZKWorkerClient();
zkWorkerClient.removeDeadServerByHost("127.0.0.1", Constants.WORKER_PREFIX); // zkWorkerClient.removeDeadServerByHost("127.0.0.1", Constants.WORKER_PREFIX);
} }

4
install.sh

@ -134,7 +134,7 @@ s3Endpoint="http://192.168.199.91:9010"
s3AccessKey="A3DXS30FO22544RE" s3AccessKey="A3DXS30FO22544RE"
s3SecretKey="OloCLq3n+8+sdPHUhJ21XrSxTC+JK" s3SecretKey="OloCLq3n+8+sdPHUhJ21XrSxTC+JK"
# resourcemanager HA配置,如果是单resourcemanager,这里为空即可 # resourcemanager HA配置,如果是单resourcemanager,这里为yarnHaIps=""
yarnHaIps="192.168.xx.xx,192.168.xx.xx" yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好 # 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好
@ -144,7 +144,7 @@ singleYarnIp="ark1"
hdfsPath="/escheduler" hdfsPath="/escheduler"
# 拥有在hdfs根路径/下创建目录权限的用户 # 拥有在hdfs根路径/下创建目录权限的用户
# 注意:如果开启了kerberos,则直接hdfsRootUser=,就可以 # 注意:如果开启了kerberos,则直接hdfsRootUser="",就可以
hdfsRootUser="hdfs" hdfsRootUser="hdfs"
# common 配置 # common 配置

436
sql/escheduler.sql

@ -1,436 +0,0 @@
/*
Navicat MySQL Data Transfer
Source Server : xx.xx
Source Server Version : 50725
Source Host : 192.168.xx.xx:3306
Source Database : escheduler
Target Server Type : MYSQL
Target Server Version : 50725
File Encoding : 65001
Date: 2019-03-23 11:47:30
*/
SET FOREIGN_KEY_CHECKS=0;
-- ----------------------------
-- Table structure for t_escheduler_alert
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_alert`;
CREATE TABLE `t_escheduler_alert` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`title` varchar(64) DEFAULT NULL COMMENT '消息标题',
`show_type` tinyint(4) DEFAULT NULL COMMENT '发送格式,0是TABLE,1是TEXT',
`content` text COMMENT '消息内容(可以是邮件,可以是短信。邮件是JSON Map存放,短信是字符串)',
`alert_type` tinyint(4) DEFAULT NULL COMMENT '0是邮件,1是短信',
`alert_status` tinyint(4) DEFAULT '0' COMMENT '0是待执行,1是执行成功,2执行失败',
`log` text COMMENT '执行日志',
`alertgroup_id` int(11) DEFAULT NULL COMMENT '发送组',
`receivers` text COMMENT '收件人',
`receivers_cc` text COMMENT '抄送人',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_alertgroup
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_alertgroup`;
CREATE TABLE `t_escheduler_alertgroup` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`group_name` varchar(255) DEFAULT NULL COMMENT '组名称',
`group_type` tinyint(4) DEFAULT NULL COMMENT '组类型(邮件0,短信1...)',
`desc` varchar(255) DEFAULT NULL COMMENT '备注',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_command
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_command`;
CREATE TABLE `t_escheduler_command` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`command_type` tinyint(4) DEFAULT NULL COMMENT '命令类型:0 启动工作流,1 从当前节点开始执行,2 恢复被容错的工作流,3 恢复暂停流程,4 从失败节点开始执行,5 补数,6 调度,7 重跑,8 暂停,9 停止,10 恢复等待线程',
`process_definition_id` int(11) DEFAULT NULL COMMENT '流程定义id',
`command_param` text COMMENT '命令的参数(json格式)',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT '节点依赖类型:0 当前节点,1 向前执行,2 向后执行',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT '失败策略:0结束,1继续',
`warning_type` tinyint(4) DEFAULT '0' COMMENT '告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发',
`warning_group_id` int(11) DEFAULT NULL COMMENT '告警组',
`schedule_time` datetime DEFAULT NULL COMMENT '预期运行时间',
`start_time` datetime DEFAULT NULL COMMENT '开始时间',
`executor_id` int(11) DEFAULT NULL COMMENT '执行用户id',
`dependence` varchar(255) DEFAULT NULL COMMENT '依赖字段',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
`process_instance_priority` int(11) DEFAULT NULL COMMENT '流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_datasource
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_datasource`;
CREATE TABLE `t_escheduler_datasource` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`name` varchar(64) NOT NULL COMMENT '数据源名称',
`note` varchar(256) DEFAULT NULL COMMENT '描述',
`type` tinyint(4) NOT NULL COMMENT '数据源类型:0 mysql,1 postgresql,2 hive,3 spark',
`user_id` int(11) NOT NULL COMMENT '创建用户id',
`connection_params` text NOT NULL COMMENT '连接参数(json格式)',
`create_time` datetime NOT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_master_server
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_master_server`;
CREATE TABLE `t_escheduler_master_server` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`host` varchar(45) DEFAULT NULL COMMENT 'ip',
`port` int(11) DEFAULT NULL COMMENT '进程号',
`zk_directory` varchar(64) DEFAULT NULL COMMENT 'zk注册目录',
`res_info` varchar(256) DEFAULT NULL COMMENT '集群资源信息:json格式{"cpu":xxx,"memroy":xxx}',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`last_heartbeat_time` datetime DEFAULT NULL COMMENT '最后心跳时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_process_definition
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_process_definition`;
CREATE TABLE `t_escheduler_process_definition` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`name` varchar(255) DEFAULT NULL COMMENT '流程定义名称',
`version` int(11) DEFAULT NULL COMMENT '流程定义版本',
`release_state` tinyint(4) DEFAULT NULL COMMENT '流程定义的发布状态:0 未上线 1已上线',
`project_id` int(11) DEFAULT NULL COMMENT '项目id',
`user_id` int(11) DEFAULT NULL COMMENT '流程定义所属用户id',
`process_definition_json` longtext COMMENT '流程定义json串',
`desc` text COMMENT '流程定义描述',
`global_params` text COMMENT '全局参数',
`flag` tinyint(4) DEFAULT NULL COMMENT '流程是否可用\r\n:0 不可用\r\n,1 可用',
`locations` text COMMENT '节点坐标信息',
`connects` text COMMENT '节点连线信息',
`receivers` text COMMENT '收件人',
`receivers_cc` text COMMENT '抄送人',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`),
KEY `process_definition_index` (`project_id`,`id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_process_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_process_instance`;
CREATE TABLE `t_escheduler_process_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`name` varchar(255) DEFAULT NULL COMMENT '流程实例名称',
`process_definition_id` int(11) DEFAULT NULL COMMENT '流程定义id',
`state` tinyint(4) DEFAULT NULL COMMENT '流程实例状态:0 提交成功,1 正在运行,2 准备暂停,3 暂停,4 准备停止,5 停止,6 失败,7 成功,8 需要容错,9 kill,10 等待线程,11 等待依赖完成',
`recovery` tinyint(4) DEFAULT NULL COMMENT '流程实例容错标识:0 正常,1 需要被容错重启',
`start_time` datetime DEFAULT NULL COMMENT '流程实例开始时间',
`end_time` datetime DEFAULT NULL COMMENT '流程实例结束时间',
`run_times` int(11) DEFAULT NULL COMMENT '流程实例运行次数',
`host` varchar(45) DEFAULT NULL COMMENT '流程实例所在的机器',
`command_type` tinyint(4) DEFAULT NULL COMMENT '命令类型:0 启动工作流,1 从当前节点开始执行,2 恢复被容错的工作流,3 恢复暂停流程,4 从失败节点开始执行,5 补数,6 调度,7 重跑,8 暂停,9 停止,10 恢复等待线程',
`command_param` text COMMENT '命令的参数(json格式)',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT '节点依赖类型:0 当前节点,1 向前执行,2 向后执行',
`max_try_times` tinyint(4) DEFAULT '0' COMMENT '最大重试次数',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT '失败策略 0 失败后结束,1 失败后继续',
`warning_type` tinyint(4) DEFAULT '0' COMMENT '告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发',
`warning_group_id` int(11) DEFAULT NULL COMMENT '告警组id',
`schedule_time` datetime DEFAULT NULL COMMENT '预期运行时间',
`command_start_time` datetime DEFAULT NULL COMMENT '开始命令时间',
`global_params` text COMMENT '全局参数(固化流程定义的参数)',
`process_instance_json` longtext COMMENT '流程实例json(copy的流程定义的json)',
`flag` tinyint(4) DEFAULT '1' COMMENT '是否可用,1 可用,0不可用',
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`is_sub_process` int(11) DEFAULT '0' COMMENT '是否是子工作流 1 是,0 不是',
`executor_id` int(11) NOT NULL COMMENT '命令执行者',
`locations` text COMMENT '节点坐标信息',
`connects` text COMMENT '节点连线信息',
`history_cmd` text COMMENT '历史命令,记录所有对流程实例的操作',
`dependence_schedule_times` text COMMENT '依赖节点的预估时间',
`process_instance_priority` int(11) DEFAULT NULL COMMENT '流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
PRIMARY KEY (`id`),
KEY `process_instance_index` (`process_definition_id`,`id`) USING BTREE,
KEY `start_time_index` (`start_time`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_project
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_project`;
CREATE TABLE `t_escheduler_project` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`name` varchar(100) DEFAULT NULL COMMENT '项目名称',
`desc` varchar(200) DEFAULT NULL COMMENT '项目描述',
`user_id` int(11) DEFAULT NULL COMMENT '所属用户',
`flag` tinyint(4) DEFAULT '1' COMMENT '是否可用 1 可用,0 不可用',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
KEY `user_id_index` (`user_id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_queue
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_queue`;
CREATE TABLE `t_escheduler_queue` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`queue_name` varchar(64) DEFAULT NULL COMMENT '队列名称',
`queue` varchar(64) DEFAULT NULL COMMENT 'yarn队列名称',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_relation_datasource_user
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_relation_datasource_user`;
CREATE TABLE `t_escheduler_relation_datasource_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`user_id` int(11) NOT NULL COMMENT '用户id',
`datasource_id` int(11) DEFAULT NULL COMMENT '数据源id',
`perm` int(11) DEFAULT '1' COMMENT '权限',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_relation_process_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_relation_process_instance`;
CREATE TABLE `t_escheduler_relation_process_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`parent_process_instance_id` int(11) DEFAULT NULL COMMENT '父流程实例id',
`parent_task_instance_id` int(11) DEFAULT NULL COMMENT '父任务实例id',
`process_instance_id` int(11) DEFAULT NULL COMMENT '子流程实例id',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_relation_project_user
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_relation_project_user`;
CREATE TABLE `t_escheduler_relation_project_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`user_id` int(11) NOT NULL COMMENT '用户id',
`project_id` int(11) DEFAULT NULL COMMENT '项目id',
`perm` int(11) DEFAULT '1' COMMENT '权限',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`),
KEY `user_id_index` (`user_id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_relation_resources_user
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_relation_resources_user`;
CREATE TABLE `t_escheduler_relation_resources_user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) NOT NULL COMMENT '用户id',
`resources_id` int(11) DEFAULT NULL COMMENT '资源id',
`perm` int(11) DEFAULT '1' COMMENT '权限',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_relation_udfs_user
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_relation_udfs_user`;
CREATE TABLE `t_escheduler_relation_udfs_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`user_id` int(11) NOT NULL COMMENT '用户id',
`udf_id` int(11) DEFAULT NULL COMMENT 'udf id',
`perm` int(11) DEFAULT '1' COMMENT '权限',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_relation_user_alertgroup
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_relation_user_alertgroup`;
CREATE TABLE `t_escheduler_relation_user_alertgroup` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`alertgroup_id` int(11) DEFAULT NULL COMMENT '组消息id',
`user_id` int(11) DEFAULT NULL COMMENT '用户id',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_resources
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_resources`;
CREATE TABLE `t_escheduler_resources` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`alias` varchar(64) DEFAULT NULL COMMENT '别名',
`file_name` varchar(64) DEFAULT NULL COMMENT '文件名',
`desc` varchar(256) DEFAULT NULL COMMENT '描述',
`user_id` int(11) DEFAULT NULL COMMENT '用户id',
`type` tinyint(4) DEFAULT NULL COMMENT '资源类型,0 FILE,1 UDF',
`size` bigint(20) DEFAULT NULL COMMENT '资源大小',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_schedules
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_schedules`;
CREATE TABLE `t_escheduler_schedules` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`process_definition_id` int(11) NOT NULL COMMENT '流程定义id',
`start_time` datetime NOT NULL COMMENT '调度开始时间',
`end_time` datetime NOT NULL COMMENT '调度结束时间',
`crontab` varchar(256) NOT NULL COMMENT 'crontab 表达式',
`failure_strategy` tinyint(4) NOT NULL COMMENT '失败策略: 0 结束,1 继续',
`user_id` int(11) NOT NULL COMMENT '用户id',
`release_state` tinyint(4) NOT NULL COMMENT '状态:0 未上线,1 上线',
`warning_type` tinyint(4) NOT NULL COMMENT '告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发',
`warning_group_id` int(11) DEFAULT NULL COMMENT '告警组id',
`process_instance_priority` int(11) DEFAULT NULL COMMENT '流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`create_time` datetime NOT NULL COMMENT '创建时间',
`update_time` datetime NOT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_session
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_session`;
CREATE TABLE `t_escheduler_session` (
`id` varchar(64) NOT NULL COMMENT '主键',
`user_id` int(11) DEFAULT NULL COMMENT '用户id',
`ip` varchar(45) DEFAULT NULL COMMENT '登录ip',
`last_login_time` datetime DEFAULT NULL COMMENT '最后登录时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_task_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_task_instance`;
CREATE TABLE `t_escheduler_task_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`name` varchar(255) DEFAULT NULL COMMENT '任务名称',
`task_type` varchar(64) DEFAULT NULL COMMENT '任务类型',
`process_definition_id` int(11) DEFAULT NULL COMMENT '流程定义id',
`process_instance_id` int(11) DEFAULT NULL COMMENT '流程实例id',
`task_json` longtext COMMENT '任务节点json',
`state` tinyint(4) DEFAULT NULL COMMENT '任务实例状态:0 提交成功,1 正在运行,2 准备暂停,3 暂停,4 准备停止,5 停止,6 失败,7 成功,8 需要容错,9 kill,10 等待线程,11 等待依赖完成',
`submit_time` datetime DEFAULT NULL COMMENT '任务提交时间',
`start_time` datetime DEFAULT NULL COMMENT '任务开始时间',
`end_time` datetime DEFAULT NULL COMMENT '任务结束时间',
`host` varchar(45) DEFAULT NULL COMMENT '执行任务的机器',
`execute_path` varchar(200) DEFAULT NULL COMMENT '任务执行路径',
`log_path` varchar(200) DEFAULT NULL COMMENT '任务日志路径',
`alert_flag` tinyint(4) DEFAULT NULL COMMENT '是否告警',
`retry_times` int(4) DEFAULT '0' COMMENT '重试次数',
`pid` int(4) DEFAULT NULL COMMENT '进程pid',
`app_link` varchar(255) DEFAULT NULL COMMENT 'yarn app id',
`flag` tinyint(4) DEFAULT '1' COMMENT '是否可用:0 不可用,1 可用',
`retry_interval` int(4) DEFAULT NULL COMMENT '重试间隔',
`max_retry_times` int(2) DEFAULT NULL COMMENT '最大重试次数',
`task_instance_priority` int(11) DEFAULT NULL COMMENT '任务实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
PRIMARY KEY (`id`),
KEY `process_instance_id` (`process_instance_id`) USING BTREE,
KEY `task_instance_index` (`process_definition_id`,`process_instance_id`) USING BTREE,
CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_escheduler_process_instance` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_tenant
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_tenant`;
CREATE TABLE `t_escheduler_tenant` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`tenant_code` varchar(64) DEFAULT NULL COMMENT '租户编码',
`tenant_name` varchar(64) DEFAULT NULL COMMENT '租户名称',
`desc` varchar(256) DEFAULT NULL COMMENT '描述',
`queue_id` int(11) DEFAULT NULL COMMENT '队列id',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_udfs
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_udfs`;
CREATE TABLE `t_escheduler_udfs` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`user_id` int(11) NOT NULL COMMENT '用户id',
`func_name` varchar(100) NOT NULL COMMENT 'UDF函数名',
`class_name` varchar(255) NOT NULL COMMENT '类名',
`type` tinyint(4) NOT NULL COMMENT 'Udf函数类型',
`arg_types` varchar(255) DEFAULT NULL COMMENT '参数',
`database` varchar(255) DEFAULT NULL COMMENT '库名',
`desc` varchar(255) DEFAULT NULL COMMENT '描述',
`resource_id` int(11) NOT NULL COMMENT '资源id',
`resource_name` varchar(255) NOT NULL COMMENT '资源名称',
`create_time` datetime NOT NULL COMMENT '创建时间',
`update_time` datetime NOT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_user
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_user`;
CREATE TABLE `t_escheduler_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '用户id',
`user_name` varchar(64) DEFAULT NULL COMMENT '用户名',
`user_password` varchar(64) DEFAULT NULL COMMENT '用户密码',
`user_type` tinyint(4) DEFAULT NULL COMMENT '用户类型:0 管理员,1 普通用户',
`email` varchar(64) DEFAULT NULL COMMENT '邮箱',
`phone` varchar(11) DEFAULT NULL COMMENT '手机',
`tenant_id` int(11) DEFAULT NULL COMMENT '管理员0,普通用户所属租户id',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`),
UNIQUE KEY `user_name_unique` (`user_name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_escheduler_worker_server
-- ----------------------------
DROP TABLE IF EXISTS `t_escheduler_worker_server`;
CREATE TABLE `t_escheduler_worker_server` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`host` varchar(45) DEFAULT NULL COMMENT 'ip',
`port` int(11) DEFAULT NULL COMMENT '进程号',
`zk_directory` varchar(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL COMMENT 'zk注册目录',
`res_info` varchar(255) DEFAULT NULL COMMENT '集群资源信息:json格式{"cpu":xxx,"memroy":xxx}',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`last_heartbeat_time` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- Records of t_escheduler_user,user : admin , password : escheduler123
INSERT INTO `t_escheduler_user` VALUES ('1', 'admin', '055a97b5fcd6d120372ad1976518f371', '0', 'xxx@qq.com', 'xxxx', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22');
INSERT INTO `t_escheduler_alertgroup` VALUES (1, 'escheduler管理员告警组', '0', 'escheduler管理员告警组','2018-11-29 10:20:39', '2018-11-29 10:20:39');
INSERT INTO `t_escheduler_relation_user_alertgroup` VALUES ('1', '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_escheduler_queue,default queue name : default
INSERT INTO `t_escheduler_queue` VALUES ('1', 'default', 'default');

179
sql/quartz.sql

@ -1,179 +0,0 @@
#
# In your Quartz properties file, you'll need to set
# org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
#
#
# By: Ron Cordell - roncordell
# I didn't see this anywhere, so I thought I'd post it here. This is the script from Quartz to create the tables in a MySQL database, modified to use INNODB instead of MYISAM.
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
DROP TABLE IF EXISTS QRTZ_LOCKS;
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_JOB_DETAILS(
SCHED_NAME VARCHAR(120) NOT NULL,
JOB_NAME VARCHAR(200) NOT NULL,
JOB_GROUP VARCHAR(200) NOT NULL,
DESCRIPTION VARCHAR(250) NULL,
JOB_CLASS_NAME VARCHAR(250) NOT NULL,
IS_DURABLE VARCHAR(1) NOT NULL,
IS_NONCONCURRENT VARCHAR(1) NOT NULL,
IS_UPDATE_DATA VARCHAR(1) NOT NULL,
REQUESTS_RECOVERY VARCHAR(1) NOT NULL,
JOB_DATA BLOB NULL,
PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
JOB_NAME VARCHAR(200) NOT NULL,
JOB_GROUP VARCHAR(200) NOT NULL,
DESCRIPTION VARCHAR(250) NULL,
NEXT_FIRE_TIME BIGINT(13) NULL,
PREV_FIRE_TIME BIGINT(13) NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE VARCHAR(16) NOT NULL,
TRIGGER_TYPE VARCHAR(8) NOT NULL,
START_TIME BIGINT(13) NOT NULL,
END_TIME BIGINT(13) NULL,
CALENDAR_NAME VARCHAR(200) NULL,
MISFIRE_INSTR SMALLINT(2) NULL,
JOB_DATA BLOB NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
REPEAT_COUNT BIGINT(7) NOT NULL,
REPEAT_INTERVAL BIGINT(12) NOT NULL,
TIMES_TRIGGERED BIGINT(10) NOT NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_CRON_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
CRON_EXPRESSION VARCHAR(120) NOT NULL,
TIME_ZONE_ID VARCHAR(80),
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_SIMPROP_TRIGGERS
(
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
STR_PROP_1 VARCHAR(512) NULL,
STR_PROP_2 VARCHAR(512) NULL,
STR_PROP_3 VARCHAR(512) NULL,
INT_PROP_1 INT NULL,
INT_PROP_2 INT NULL,
LONG_PROP_1 BIGINT NULL,
LONG_PROP_2 BIGINT NULL,
DEC_PROP_1 NUMERIC(13,4) NULL,
DEC_PROP_2 NUMERIC(13,4) NULL,
BOOL_PROP_1 VARCHAR(1) NULL,
BOOL_PROP_2 VARCHAR(1) NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_BLOB_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
BLOB_DATA BLOB NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
INDEX (SCHED_NAME,TRIGGER_NAME, TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_CALENDARS (
SCHED_NAME VARCHAR(120) NOT NULL,
CALENDAR_NAME VARCHAR(200) NOT NULL,
CALENDAR BLOB NOT NULL,
PRIMARY KEY (SCHED_NAME,CALENDAR_NAME))
ENGINE=InnoDB;
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_FIRED_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
ENTRY_ID VARCHAR(95) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
INSTANCE_NAME VARCHAR(200) NOT NULL,
FIRED_TIME BIGINT(13) NOT NULL,
SCHED_TIME BIGINT(13) NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE VARCHAR(16) NOT NULL,
JOB_NAME VARCHAR(200) NULL,
JOB_GROUP VARCHAR(200) NULL,
IS_NONCONCURRENT VARCHAR(1) NULL,
REQUESTS_RECOVERY VARCHAR(1) NULL,
PRIMARY KEY (SCHED_NAME,ENTRY_ID))
ENGINE=InnoDB;
CREATE TABLE QRTZ_SCHEDULER_STATE (
SCHED_NAME VARCHAR(120) NOT NULL,
INSTANCE_NAME VARCHAR(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT(13) NOT NULL,
CHECKIN_INTERVAL BIGINT(13) NOT NULL,
PRIMARY KEY (SCHED_NAME,INSTANCE_NAME))
ENGINE=InnoDB;
CREATE TABLE QRTZ_LOCKS (
SCHED_NAME VARCHAR(120) NOT NULL,
LOCK_NAME VARCHAR(40) NOT NULL,
PRIMARY KEY (SCHED_NAME,LOCK_NAME))
ENGINE=InnoDB;
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
commit;
Loading…
Cancel
Save