From a6e2d1eb82043b6f9654d22e0ff841caaeb38405 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E4=B9=94=E5=8D=A0=E5=8D=AB?= <825193156@qq.com>
Date: Mon, 9 Sep 2019 14:34:35 +0800
Subject: [PATCH 01/23] the process instance is deleted, the task corresponding
to the zk queue still exists, and the task is squeezed. #754 bug fix (#775)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* mission log disorder,bug #751 fix
* the log path of the task and the log path of the task instance are different. The log cannot be viewed. #723
* the log path of the task and the log path of the task instance are different. The log cannot be viewed. #723 bug fix
* after starting kerberos authentication, tgt expires after one day,bug #742 fix
* log pattern modify
* LoggerServer remove comment code and ShellCommandExecutor modify
* PythonCommandExecutor modify
* Concurrent task log bug #730 fix
* remove invalid commit
* The process instance is deleted, the task corresponding to the zk queue still exists, and the task is squeezed. #754 fix bug
* The process instance is deleted, the task corresponding to the zk queue still exists, and the task is squeezed. #754 re fix
* The process instance is deleted, the task corresponding to the zk queue still exists, and the task is squeezed. #754 bug fix
---
.../api/service/ProcessInstanceService.java | 68 ++++++++++++++-----
1 file changed, 50 insertions(+), 18 deletions(-)
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java
index 3171f26dcd..0e05d944ae 100644
--- a/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java
+++ b/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java
@@ -32,10 +32,7 @@ import cn.escheduler.common.model.TaskNodeRelation;
import cn.escheduler.common.process.Property;
import cn.escheduler.common.queue.ITaskQueue;
import cn.escheduler.common.queue.TaskQueueFactory;
-import cn.escheduler.common.utils.CollectionUtils;
-import cn.escheduler.common.utils.DateUtils;
-import cn.escheduler.common.utils.JSONUtils;
-import cn.escheduler.common.utils.ParameterUtils;
+import cn.escheduler.common.utils.*;
import cn.escheduler.common.utils.placeholder.BusinessTimeUtils;
import cn.escheduler.dao.ProcessDao;
import cn.escheduler.dao.mapper.*;
@@ -493,29 +490,64 @@ public class ProcessInstanceService extends BaseDAGService {
return result;
}
- int delete = processDao.deleteWorkProcessInstanceById(processInstanceId);
- processDao.deleteAllSubWorkProcessByParentId(processInstanceId);
- processDao.deleteWorkProcessMapByParentId(processInstanceId);
+ // delete zk queue
+ if (CollectionUtils.isNotEmpty(taskInstanceList)){
+ for (TaskInstance taskInstance : taskInstanceList){
+ // task instance priority
+ int taskInstancePriority = taskInstance.getTaskInstancePriority().ordinal();
+
+ StringBuilder nodeValueSb = new StringBuilder(100);
+ nodeValueSb.append(processInstancePriority)
+ .append(UNDERLINE)
+ .append(processInstanceId)
+ .append(UNDERLINE)
+ .append(taskInstancePriority)
+ .append(UNDERLINE)
+ .append(taskInstance.getId())
+ .append(UNDERLINE);
+
+ int taskWorkerGroupId = processDao.getTaskWorkerGroupId(taskInstance);
+ WorkerGroup workerGroup = workerGroupMapper.queryById(taskWorkerGroupId);
+
+ if(workerGroup == null){
+ nodeValueSb.append(DEFAULT_WORKER_ID);
+ }else {
+
+ String ips = workerGroup.getIpList();
+ StringBuilder ipSb = new StringBuilder(100);
+ String[] ipArray = ips.split(COMMA);
+
+ for (String ip : ipArray) {
+ long ipLong = IpUtils.ipToLong(ip);
+ ipSb.append(ipLong).append(COMMA);
+ }
- if (delete > 0) {
- if (CollectionUtils.isNotEmpty(taskInstanceList)){
- for (TaskInstance taskInstance : taskInstanceList){
- // task instance priority
- int taskInstancePriority = taskInstance.getTaskInstancePriority().ordinal();
- String nodeValue=processInstancePriority + "_" + processInstanceId + "_" +taskInstancePriority + "_" + taskInstance.getId();
- try {
- logger.info("delete task queue node : {}",nodeValue);
- tasksQueue.removeNode(cn.escheduler.common.Constants.SCHEDULER_TASKS_QUEUE, nodeValue);
- }catch (Exception e){
- logger.error("delete task queue node : {}", nodeValue);
+ if(ipSb.length() > 0) {
+ ipSb.deleteCharAt(ipSb.length() - 1);
}
+ nodeValueSb.append(ipSb);
+ }
+
+ try {
+ logger.info("delete task queue node : {}",nodeValueSb.toString());
+ tasksQueue.removeNode(cn.escheduler.common.Constants.SCHEDULER_TASKS_QUEUE, nodeValueSb.toString());
+ }catch (Exception e){
+ logger.error("delete task queue node : {}", nodeValueSb.toString());
}
}
+ }
+ // delete database cascade
+ int delete = processDao.deleteWorkProcessInstanceById(processInstanceId);
+ processDao.deleteAllSubWorkProcessByParentId(processInstanceId);
+ processDao.deleteWorkProcessMapByParentId(processInstanceId);
+
+ if (delete > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_PROCESS_INSTANCE_BY_ID_ERROR);
}
+
return result;
}
From 0877e35efaee6afb04dd5519e332a2ef08031c7e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E4=B9=94=E5=8D=A0=E5=8D=AB?= <825193156@qq.com>
Date: Thu, 12 Sep 2019 11:24:55 +0800
Subject: [PATCH 02/23] service start exception modify (#787)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* service start exception modify
* master,worker start modify
* .env update
---
.../src/main/java/cn/escheduler/dao/DaoFactory.java | 8 ++++----
.../java/cn/escheduler/server/master/MasterServer.java | 6 +++---
.../java/cn/escheduler/server/worker/WorkerServer.java | 6 +++---
.../src/main/resources/application_master.properties | 3 +++
.../src/main/resources/application_worker.properties | 4 ++++
script/escheduler-daemon.sh | 2 +-
6 files changed, 18 insertions(+), 11 deletions(-)
create mode 100644 escheduler-server/src/main/resources/application_worker.properties
diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java b/escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java
index 97e65115d0..8237a85805 100644
--- a/escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java
+++ b/escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java
@@ -48,10 +48,10 @@ public class DaoFactory {
synchronized (daoMap) {
if (!daoMap.containsKey(className)) {
try {
- T t = BeanContext.getBean(clazz);
-// T t = clazz.getConstructor().newInstance();
-// // 实例初始化
-// t.init();
+// T t = BeanContext.getBean(clazz);
+ T t = clazz.getConstructor().newInstance();
+ // 实例初始化
+ t.init();
daoMap.put(className, t);
} catch (Exception e) {
logger.error(e.getMessage(), e);
diff --git a/escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java b/escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java
index 231273e2a1..d68b181660 100644
--- a/escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java
+++ b/escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java
@@ -33,6 +33,7 @@ import org.quartz.SchedulerException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.SpringApplication;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan;
@@ -90,10 +91,9 @@ public class MasterServer extends AbstractServer {
* master server not use web service
*/
public static void main(String[] args) {
- SpringApplicationBuilder app = new SpringApplicationBuilder(MasterServer.class);
+ SpringApplication app = new SpringApplication(MasterServer.class);
- app.web(WebApplicationType.NONE)
- .run(args);
+ app.run(args);
}
diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java
index af9a8ee997..61ca0b309d 100644
--- a/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java
+++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java
@@ -39,6 +39,7 @@ import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.SpringApplication;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan;
@@ -121,10 +122,9 @@ public class WorkerServer extends AbstractServer {
*/
public static void main(String[] args) {
- SpringApplicationBuilder app = new SpringApplicationBuilder(WorkerServer.class);
+ SpringApplication app = new SpringApplication(WorkerServer.class);
- app.web(WebApplicationType.NONE)
- .run(args);
+ app.run(args);
}
diff --git a/escheduler-server/src/main/resources/application_master.properties b/escheduler-server/src/main/resources/application_master.properties
index cc4774ae94..68fe3dd02c 100644
--- a/escheduler-server/src/main/resources/application_master.properties
+++ b/escheduler-server/src/main/resources/application_master.properties
@@ -1 +1,4 @@
logging.config=classpath:master_logback.xml
+
+# server port
+server.port=5566
diff --git a/escheduler-server/src/main/resources/application_worker.properties b/escheduler-server/src/main/resources/application_worker.properties
new file mode 100644
index 0000000000..39983e4fba
--- /dev/null
+++ b/escheduler-server/src/main/resources/application_worker.properties
@@ -0,0 +1,4 @@
+logging.config=classpath:master_logback.xml
+
+# server port
+server.port=7788
diff --git a/script/escheduler-daemon.sh b/script/escheduler-daemon.sh
index 99f0dee444..d54272c886 100644
--- a/script/escheduler-daemon.sh
+++ b/script/escheduler-daemon.sh
@@ -47,7 +47,7 @@ elif [ "$command" = "master-server" ]; then
LOG_FILE="-Dspring.config.location=conf/application_master.properties -Ddruid.mysql.usePingMethod=false"
CLASS=cn.escheduler.server.master.MasterServer
elif [ "$command" = "worker-server" ]; then
- LOG_FILE="-Dlogback.configurationFile=conf/worker_logback.xml -Ddruid.mysql.usePingMethod=false"
+ LOG_FILE="-Dspring.config.location=conf/application_worker.properties -Ddruid.mysql.usePingMethod=false"
CLASS=cn.escheduler.server.worker.WorkerServer
elif [ "$command" = "alert-server" ]; then
LOG_FILE="-Dlogback.configurationFile=conf/alert_logback.xml"
From 0daf0b0239aaba3e7e6cb11efb733bd8de280a36 Mon Sep 17 00:00:00 2001
From: break60 <790061044@qq.com>
Date: Thu, 12 Sep 2019 17:39:12 +0800
Subject: [PATCH 03/23] package (#789)
---
escheduler-ui/package.json | 8 --------
1 file changed, 8 deletions(-)
diff --git a/escheduler-ui/package.json b/escheduler-ui/package.json
index acfa498a54..c1cd0c483e 100644
--- a/escheduler-ui/package.json
+++ b/escheduler-ui/package.json
@@ -79,14 +79,6 @@
"devDependencies": {
"jasmine-core": "^3.2.1",
"jquery": "1.12.4",
- "karma": "^3.0.0",
- "karma-browserstack-launcher": "^1.3.0",
- "karma-chrome-launcher": "^2.2.0",
- "karma-coverage": "^1.1.2",
- "karma-jasmine": "^1.1.2",
- "karma-sourcemap-loader": "^0.3.7",
- "karma-spec-reporter": "^0.0.32",
- "karma-webpack": "^3.0.0",
"vue": "^2.5.17",
"vue-router": "2.7.0",
"vuex": "^3.0.0"
From f6ef6e2f9d545c0e89608e16a0041dc631a9e461 Mon Sep 17 00:00:00 2001
From: Kevin Ratnasekera
Date: Mon, 16 Sep 2019 08:35:57 +0530
Subject: [PATCH 04/23] Refactor hardcoded versions in parent pom (#773)
---
pom.xml | 103 +++++++++++++++++++++++++++++++++++++-------------------
1 file changed, 68 insertions(+), 35 deletions(-)
diff --git a/pom.xml b/pom.xml
index f8f64313d8..3ec0b4f27e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -20,6 +20,39 @@
2.7.32.2.32.9.8
+ 3.5.1
+ 2.0.1
+ 5.0.5
+ 1.2.29
+ 1.1.14
+ 1.3.163
+ 1.6
+ 1.1.1
+ 4.4.1
+ 4.4.1
+ 4.12
+ 5.1.34
+ 1.7.5
+ 1.7.5
+ 3.2.2
+ 2.3
+ 3.5
+ 3.0.1
+ 1.7.0
+ 1.10
+ 1.5
+ 3.17
+ 2.3.21
+ 3.1.0
+ 4.1
+ 20.0
+ 42.1.4
+ 2.1.0
+ 2.4
+ 3.5.0
+ 0.1.52
+ 6.1.0.jre8
+ 6.1.14
@@ -28,22 +61,22 @@
org.mybatismybatis
- 3.5.1
+ ${mybatis.version}org.mybatismybatis-spring
- 2.0.1
+ ${mybatis.spring.version}org.mybatis.spring.bootmybatis-spring-boot-autoconfigure
- 2.0.1
+ ${mybatis.spring.version}org.mybatis.spring.bootmybatis-spring-boot-starter
- 2.0.1
+ ${mybatis.spring.version}
@@ -60,18 +93,18 @@
com.cronutilscron-utils
- 5.0.5
+ ${cron.utils.version}com.alibabafastjson
- 1.2.29
+ ${fastjson.version}com.alibabadruid
- 1.1.14
+ ${druid.version}
@@ -116,7 +149,7 @@
com.h2databaseh2
- 1.3.163
+ ${h2.version}test
@@ -165,22 +198,22 @@
commons-codeccommons-codec
- 1.6
+ ${commons.codec.version}commons-loggingcommons-logging
- 1.1.1
+ ${commons.logging.version}org.apache.httpcomponentshttpclient
- 4.4.1
+ ${httpclient.version}org.apache.httpcomponentshttpcore
- 4.4.1
+ ${httpcore.version}com.fasterxml.jackson.core
@@ -201,56 +234,56 @@
junitjunit
- 4.12
+ ${junit.version}mysqlmysql-connector-java
- 5.1.34
+ ${mysql.connector.version}org.slf4jslf4j-api
- 1.7.5
+ ${slf4j.api.version}org.slf4jslf4j-log4j12
- 1.7.5
+ ${slf4j.log4j12.version}commons-collectionscommons-collections
- 3.2.2
+ ${commons.collections.version}commons-langcommons-lang
- 2.3
+ ${commons.lang.version}org.apache.commonscommons-lang3
- 3.5
+ ${commons.lang3.version}commons-httpclientcommons-httpclient
- 3.0.1
+ ${commons.httpclient}commons-beanutilscommons-beanutils
- 1.7.0
+ ${commons.beanutils.version}commons-configurationcommons-configuration
- 1.10
+ ${commons.configuration.version}
@@ -268,20 +301,20 @@
org.apache.commonscommons-email
- 1.5
+ ${commons.email.version}org.apache.poipoi
- 3.17
+ ${poi.version}org.freemarkerfreemarker
- 2.3.21
+ ${freemarker.version}
@@ -325,61 +358,61 @@
javax.servletjavax.servlet-api
- 3.1.0
+ ${javax.servlet.api.version}org.apache.commonscommons-collections4
- 4.1
+ ${commons.collections4.version}com.google.guavaguava
- 20.0
+ ${guava.version}org.postgresqlpostgresql
- 42.1.4
+ ${postgresql.version}org.apache.hivehive-jdbc
- 2.1.0
+ ${hive.jdbc.version}commons-iocommons-io
- 2.4
+ ${commons.io.version}com.github.oshioshi-core
- 3.5.0
+ ${oshi.core.version}ru.yandex.clickhouseclickhouse-jdbc
- 0.1.52
+ ${clickhouse.jdbc.version}com.microsoft.sqlservermssql-jdbc
- 6.1.0.jre8
+ ${mssql.jdbc.version}org.mortbay.jettyjsp-2.1
- 6.1.14
+ ${jsp.version}
From fc41a8f51986c4d012c667dd32f625e0d1e0c9c1 Mon Sep 17 00:00:00 2001
From: break60 <790061044@qq.com>
Date: Tue, 17 Sep 2019 13:59:32 +0800
Subject: [PATCH 05/23] instance/pages/list/_source/list.vue && package (#796)
* package
* Modify the workflow instance page status to run, prohibit deletion, delete the package.json package babel-runtime and jasmine-core
---
escheduler-ui/package.json | 2 --
.../pages/projects/pages/instance/pages/list/_source/list.vue | 1 +
escheduler-ui/src/lib/@analysys/ans-ui/package.json | 1 -
escheduler-ui/src/lib/@fedor/io/package.json | 1 -
4 files changed, 1 insertion(+), 4 deletions(-)
diff --git a/escheduler-ui/package.json b/escheduler-ui/package.json
index c1cd0c483e..c6a2a92e40 100644
--- a/escheduler-ui/package.json
+++ b/escheduler-ui/package.json
@@ -27,7 +27,6 @@
"babel-plugin-transform-runtime": "^6.23.0",
"babel-plugin-transform-vue-jsx": "^3.5.0",
"babel-preset-env": "^1.6.1",
- "babel-runtime": "^6.26.0",
"bootstrap": "3.3.7",
"canvg": "1.5",
"clipboard": "^2.0.1",
@@ -77,7 +76,6 @@
]
},
"devDependencies": {
- "jasmine-core": "^3.2.1",
"jquery": "1.12.4",
"vue": "^2.5.17",
"vue-router": "2.7.0",
diff --git a/escheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/_source/list.vue b/escheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/_source/list.vue
index 619407a61a..9efc9f721a 100644
--- a/escheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/_source/list.vue
+++ b/escheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/_source/list.vue
@@ -123,6 +123,7 @@
shape="circle"
size="xsmall"
data-toggle="tooltip"
+ :disabled="item.state === 'RUNNING_EXEUTION'"
:title="$t('delete')">
diff --git a/escheduler-ui/src/lib/@analysys/ans-ui/package.json b/escheduler-ui/src/lib/@analysys/ans-ui/package.json
index cddb561eb3..061f54a7ad 100644
--- a/escheduler-ui/src/lib/@analysys/ans-ui/package.json
+++ b/escheduler-ui/src/lib/@analysys/ans-ui/package.json
@@ -53,7 +53,6 @@
"babel-plugin-transform-runtime": "^6.23.0",
"babel-plugin-transform-vue-jsx": "^3.7.0",
"babel-preset-env": "^1.5.2",
- "babel-runtime": "^6.26.0",
"cross-env": "^5.2.0",
"css-loader": "0.28.8",
"cssnano": "^4.0.3",
diff --git a/escheduler-ui/src/lib/@fedor/io/package.json b/escheduler-ui/src/lib/@fedor/io/package.json
index 0066dfa8f0..20563bedeb 100644
--- a/escheduler-ui/src/lib/@fedor/io/package.json
+++ b/escheduler-ui/src/lib/@fedor/io/package.json
@@ -23,7 +23,6 @@
"babel-plugin-transform-class-properties": "^6.24.1",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.5.2",
- "babel-runtime": "^6.26.0",
"body-parser": "^1.17.2",
"chai": "^4.1.1",
"cors": "^2.8.4",
From d5f17f579cc598ff94aa7ae00b0f0f61ab85529c Mon Sep 17 00:00:00 2001
From: lgcareer <18610854716@163.com>
Date: Tue, 17 Sep 2019 19:04:45 +0800
Subject: [PATCH 06/23] add support for postgresql in upgrade database (#801)
* Remove useless dependencies and add jsp-2.1-6.1.14
* Determine if principal is empty in getJdbcUrl method
* fix bug when register worker but create master node in zookeeper
* Remove useless dependencies such as hive-shims and log4j
* upgrade commons-email from 1.2 to 1.5 and remove mail-1.4.5.jar
* add support for postgresql in upgrade database
* add support for postgresql in upgrade database
---
.../{MysqlUtils.java => ConnectionUtils.java} | 14 +-
...ager.java => DolphinSchedulerManager.java} | 48 +-
.../dao/upgrade/MysqlUpgradeDao.java | 104 +++
.../dao/upgrade/PostgresqlUpgradeDao.java | 132 +++
.../cn/escheduler/dao/upgrade/UpgradeDao.java | 131 ++-
...duler.java => CreateDolphinScheduler.java} | 22 +-
...heduler.java => InitDolphinScheduler.java} | 16 +-
...uler.java => UpgradeDolphinScheduler.java} | 18 +-
script/create_escheduler.sh | 2 +-
script/upgrade_escheduler.sh | 2 +-
...duler_ddl.sql => dolphinscheduler_ddl.sql} | 0
...duler_dml.sql => dolphinscheduler_dml.sql} | 0
.../postgresql/dolphinscheduler_ddl.sql | 804 ++++++++++++++++++
.../postgresql/dolphinscheduler_dml.sql | 8 +
sql/soft_version | 2 +-
...duler_ddl.sql => dolphinscheduler_ddl.sql} | 0
...duler_dml.sql => dolphinscheduler_dml.sql} | 0
...duler_ddl.sql => dolphinscheduler_ddl.sql} | 0
...duler_dml.sql => dolphinscheduler_dml.sql} | 0
...duler_ddl.sql => dolphinscheduler_ddl.sql} | 0
...duler_dml.sql => dolphinscheduler_dml.sql} | 0
.../mysql/dolphinscheduler_ddl.sql | 0
.../mysql/dolphinscheduler_dml.sql | 0
.../postgresql/dolphinscheduler_ddl.sql | 0
.../postgresql/dolphinscheduler_dml.sql | 0
25 files changed, 1180 insertions(+), 123 deletions(-)
rename escheduler-common/src/main/java/cn/escheduler/common/utils/{MysqlUtils.java => ConnectionUtils.java} (87%)
rename escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/{EschedulerManager.java => DolphinSchedulerManager.java} (67%)
create mode 100644 escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/MysqlUpgradeDao.java
create mode 100644 escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/PostgresqlUpgradeDao.java
rename escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/{CreateEscheduler.java => CreateDolphinScheduler.java} (66%)
rename escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/{InitEscheduler.java => InitDolphinScheduler.java} (72%)
rename escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/{UpgradeEscheduler.java => UpgradeDolphinScheduler.java} (73%)
rename sql/create/release-1.0.0_schema/mysql/{escheduler_ddl.sql => dolphinscheduler_ddl.sql} (100%)
rename sql/create/release-1.0.0_schema/mysql/{escheduler_dml.sql => dolphinscheduler_dml.sql} (100%)
create mode 100644 sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_ddl.sql
create mode 100644 sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_dml.sql
rename sql/upgrade/1.0.1_schema/mysql/{escheduler_ddl.sql => dolphinscheduler_ddl.sql} (100%)
rename sql/upgrade/1.0.1_schema/mysql/{escheduler_dml.sql => dolphinscheduler_dml.sql} (100%)
rename sql/upgrade/1.0.2_schema/mysql/{escheduler_ddl.sql => dolphinscheduler_ddl.sql} (100%)
rename sql/upgrade/1.0.2_schema/mysql/{escheduler_dml.sql => dolphinscheduler_dml.sql} (100%)
rename sql/upgrade/1.1.0_schema/mysql/{escheduler_ddl.sql => dolphinscheduler_ddl.sql} (100%)
rename sql/upgrade/1.1.0_schema/mysql/{escheduler_dml.sql => dolphinscheduler_dml.sql} (100%)
create mode 100644 sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_ddl.sql
create mode 100644 sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_dml.sql
create mode 100644 sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_ddl.sql
create mode 100644 sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_dml.sql
diff --git a/escheduler-common/src/main/java/cn/escheduler/common/utils/MysqlUtils.java b/escheduler-common/src/main/java/cn/escheduler/common/utils/ConnectionUtils.java
similarity index 87%
rename from escheduler-common/src/main/java/cn/escheduler/common/utils/MysqlUtils.java
rename to escheduler-common/src/main/java/cn/escheduler/common/utils/ConnectionUtils.java
index 3520527c1a..33e5d41b97 100644
--- a/escheduler-common/src/main/java/cn/escheduler/common/utils/MysqlUtils.java
+++ b/escheduler-common/src/main/java/cn/escheduler/common/utils/ConnectionUtils.java
@@ -21,16 +21,16 @@ import org.slf4j.LoggerFactory;
import java.sql.*;
-public class MysqlUtils {
+public class ConnectionUtils {
- public static final Logger logger = LoggerFactory.getLogger(MysqlUtils.class);
+ public static final Logger logger = LoggerFactory.getLogger(ConnectionUtils.class);
- private static MysqlUtils instance;
+ private static ConnectionUtils instance;
- MysqlUtils() {
+ ConnectionUtils() {
}
- public static MysqlUtils getInstance() {
+ public static ConnectionUtils getInstance() {
if (null == instance) {
syncInit();
}
@@ -39,7 +39,7 @@ public class MysqlUtils {
private static synchronized void syncInit() {
if (instance == null) {
- instance = new MysqlUtils();
+ instance = new ConnectionUtils();
}
}
@@ -76,7 +76,7 @@ public class MysqlUtils {
}
public static void releaseResource(ResultSet rs, PreparedStatement ps, Connection conn) {
- MysqlUtils.getInstance().release(rs,ps,conn);
+ ConnectionUtils.getInstance().release(rs,ps,conn);
if (null != rs) {
try {
rs.close();
diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/EschedulerManager.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/DolphinSchedulerManager.java
similarity index 67%
rename from escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/EschedulerManager.java
rename to escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/DolphinSchedulerManager.java
index 15448021c3..210b95ff6f 100644
--- a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/EschedulerManager.java
+++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/DolphinSchedulerManager.java
@@ -16,6 +16,7 @@
*/
package cn.escheduler.dao.upgrade;
+import cn.escheduler.common.enums.DbType;
import cn.escheduler.common.utils.SchemaUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -25,30 +26,51 @@ import java.util.List;
/**
* upgrade manager
*/
-public class EschedulerManager {
- private static final Logger logger = LoggerFactory.getLogger(EschedulerManager.class);
- UpgradeDao upgradeDao = UpgradeDao.getInstance();
+public class DolphinSchedulerManager {
+ private static final Logger logger = LoggerFactory.getLogger(DolphinSchedulerManager.class);
+ UpgradeDao upgradeDao;
+
+ private void initUpgradeDao() {
+ DbType dbType = UpgradeDao.getDbType();
+ if (dbType != null) {
+ switch (dbType) {
+ case MYSQL:
+ upgradeDao = MysqlUpgradeDao.getInstance();
+ break;
+ case POSTGRESQL:
+ upgradeDao = PostgresqlUpgradeDao.getInstance();
+ break;
+ default:
+ logger.error("not support sql type: {},can't upgrade", dbType);
+ throw new IllegalArgumentException("not support sql type,can't upgrade");
+ }
+ }
+ }
+
+ public DolphinSchedulerManager() {
+ initUpgradeDao();
+ }
- public void initEscheduler() {
+ public void initDolphinScheduler() {
// Determines whether the escheduler table structure has been init
if(upgradeDao.isExistsTable("t_escheduler_version") || upgradeDao.isExistsTable("t_escheduler_queue")) {
logger.info("The database has been initialized. Skip the initialization step");
return;
}
- this.initEschedulerSchema();
+ this.initDolphinSchedulerSchema();
}
- public void initEschedulerSchema() {
+ public void initDolphinSchedulerSchema() {
- logger.info("Start initializing the escheduler manager mysql table structure");
- upgradeDao.initEschedulerSchema();
+ logger.info("Start initializing the DolphinScheduler manager table structure");
+ upgradeDao.initSchema();
}
/**
- * upgrade escheduler
+ * upgrade DolphinScheduler
*/
- public void upgradeEscheduler() throws Exception{
+ public void upgradeDolphinScheduler() throws Exception{
// Gets a list of all upgrades
List schemaList = SchemaUtils.getAllSchemaList();
@@ -76,11 +98,11 @@ public class EschedulerManager {
schemaVersion = schemaDir.split("_")[0];
if(SchemaUtils.isAGreatVersion(schemaVersion , version)) {
- logger.info("upgrade escheduler metadata version from " + version + " to " + schemaVersion);
+ logger.info("upgrade DolphinScheduler metadata version from " + version + " to " + schemaVersion);
- logger.info("Begin upgrading escheduler's mysql table structure");
- upgradeDao.upgradeEscheduler(schemaDir);
+ logger.info("Begin upgrading DolphinScheduler's table structure");
+ upgradeDao.upgradeDolphinScheduler(schemaDir);
if(SchemaUtils.isAGreatVersion(version,"1.0.1")){
version = upgradeDao.getCurrentVersion();
}else {
diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/MysqlUpgradeDao.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/MysqlUpgradeDao.java
new file mode 100644
index 0000000000..17832896bc
--- /dev/null
+++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/MysqlUpgradeDao.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.dao.upgrade;
+
+import cn.escheduler.common.utils.ConnectionUtils;
+import cn.escheduler.dao.datasource.ConnectionFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+public class MysqlUpgradeDao extends UpgradeDao {
+
+ public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class);
+ private static final String T_VERSION_NAME = "t_escheduler_version";
+ private static final String rootDir = System.getProperty("user.dir");
+
+ @Override
+ protected void init() {
+
+ }
+
+ private static class MysqlUpgradeDaoHolder {
+ private static final MysqlUpgradeDao INSTANCE = new MysqlUpgradeDao();
+ }
+
+ private MysqlUpgradeDao() {
+ }
+
+ public static final MysqlUpgradeDao getInstance() {
+ return MysqlUpgradeDaoHolder.INSTANCE;
+ }
+
+
+ /**
+ * Determines whether a table exists
+ * @param tableName
+ * @return
+ */
+ public boolean isExistsTable(String tableName) {
+ Connection conn = null;
+ try {
+ conn = ConnectionFactory.getDataSource().getConnection();
+ ResultSet rs = conn.getMetaData().getTables(null, null, tableName, null);
+ if (rs.next()) {
+ return true;
+ } else {
+ return false;
+ }
+
+ } catch (SQLException e) {
+ logger.error(e.getMessage(),e);
+ throw new RuntimeException(e.getMessage(),e);
+ } finally {
+ ConnectionUtils.releaseResource(null, null, conn);
+
+ }
+
+ }
+
+ /**
+ * Determines whether a field exists in the specified table
+ * @param tableName
+ * @param columnName
+ * @return
+ */
+ public boolean isExistsColumn(String tableName,String columnName) {
+ Connection conn = null;
+ try {
+ conn = ConnectionFactory.getDataSource().getConnection();
+ ResultSet rs = conn.getMetaData().getColumns(null,null,tableName,columnName);
+ if (rs.next()) {
+ return true;
+ } else {
+ return false;
+ }
+
+ } catch (SQLException e) {
+ logger.error(e.getMessage(),e);
+ throw new RuntimeException(e.getMessage(),e);
+ } finally {
+ ConnectionUtils.releaseResource(null, null, conn);
+
+ }
+
+ }
+
+}
diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/PostgresqlUpgradeDao.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/PostgresqlUpgradeDao.java
new file mode 100644
index 0000000000..03ec8c819a
--- /dev/null
+++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/PostgresqlUpgradeDao.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.dao.upgrade;
+
+import cn.escheduler.common.utils.ConnectionUtils;
+import cn.escheduler.dao.datasource.ConnectionFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+public class PostgresqlUpgradeDao extends UpgradeDao {
+
+ public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class);
+ private static final String T_VERSION_NAME = "t_escheduler_version";
+ private static final String rootDir = System.getProperty("user.dir");
+ private static final String schema = getSchema();
+
+ @Override
+ protected void init() {
+
+ }
+
+ private static class PostgresqlUpgradeDaoHolder {
+ private static final PostgresqlUpgradeDao INSTANCE = new PostgresqlUpgradeDao();
+ }
+
+ private PostgresqlUpgradeDao() {
+ }
+
+ public static final PostgresqlUpgradeDao getInstance() {
+ return PostgresqlUpgradeDaoHolder.INSTANCE;
+ }
+
+
+ @Override
+ public void initSchema(String initSqlPath) {
+ super.initSchema(initSqlPath);
+ }
+
+ private static String getSchema(){
+ Connection conn = null;
+ PreparedStatement pstmt = null;
+ try {
+ conn = ConnectionFactory.getDataSource().getConnection();
+ pstmt = conn.prepareStatement("select current_schema()");
+ ResultSet resultSet = pstmt.executeQuery();
+ while (resultSet.next()){
+ if(resultSet.isFirst()){
+ return resultSet.getString(1);
+ }
+ }
+ } catch (SQLException e) {
+ logger.error(e.getMessage(),e);
+
+ } finally {
+ ConnectionUtils.releaseResource(null, null, conn);
+ }
+ return "";
+ }
+
+ /**
+ * Determines whether a table exists
+ * @param tableName
+ * @return
+ */
+ public boolean isExistsTable(String tableName) {
+ Connection conn = null;
+ try {
+ conn = ConnectionFactory.getDataSource().getConnection();
+
+ ResultSet rs = conn.getMetaData().getTables(null, schema, tableName, null);
+ if (rs.next()) {
+ return true;
+ } else {
+ return false;
+ }
+
+ } catch (SQLException e) {
+ logger.error(e.getMessage(),e);
+ throw new RuntimeException(e.getMessage(),e);
+ } finally {
+ ConnectionUtils.releaseResource(null, null, conn);
+ }
+
+ }
+
+ /**
+ * Determines whether a field exists in the specified table
+ * @param tableName
+ * @param columnName
+ * @return
+ */
+ public boolean isExistsColumn(String tableName,String columnName) {
+ Connection conn = null;
+ try {
+ conn = ConnectionFactory.getDataSource().getConnection();
+ ResultSet rs = conn.getMetaData().getColumns(null,schema,tableName,columnName);
+ if (rs.next()) {
+ return true;
+ } else {
+ return false;
+ }
+
+ } catch (SQLException e) {
+ logger.error(e.getMessage(),e);
+ throw new RuntimeException(e.getMessage(),e);
+ } finally {
+ ConnectionUtils.releaseResource(null, null, conn);
+
+ }
+
+ }
+
+}
diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java
index 6fc8a61417..13c0deffb9 100644
--- a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java
+++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java
@@ -16,7 +16,8 @@
*/
package cn.escheduler.dao.upgrade;
-import cn.escheduler.common.utils.MysqlUtils;
+import cn.escheduler.common.enums.DbType;
+import cn.escheduler.common.utils.ConnectionUtils;
import cn.escheduler.common.utils.ScriptRunner;
import cn.escheduler.dao.AbstractBaseDao;
import cn.escheduler.dao.datasource.ConnectionFactory;
@@ -29,8 +30,9 @@ import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
+import java.text.MessageFormat;
-public class UpgradeDao extends AbstractBaseDao {
+public abstract class UpgradeDao extends AbstractBaseDao {
public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class);
private static final String T_VERSION_NAME = "t_escheduler_version";
@@ -41,35 +43,59 @@ public class UpgradeDao extends AbstractBaseDao {
}
- private static class UpgradeDaoHolder {
- private static final UpgradeDao INSTANCE = new UpgradeDao();
- }
-
- private UpgradeDao() {
+ /**
+ * get db type
+ * @return
+ */
+ public static DbType getDbType(){
+ try {
+ Connection conn = ConnectionFactory.getDataSource().getConnection();
+ String name = conn.getMetaData().getDatabaseProductName().toUpperCase();
+ return DbType.valueOf(name);
+ } catch (Exception e) {
+ logger.error(e.getMessage(),e);
+ return null;
+ }
}
- public static final UpgradeDao getInstance() {
- return UpgradeDaoHolder.INSTANCE;
+ public void initSchema(){
+ DbType dbType = getDbType();
+ String initSqlPath = "";
+ if (dbType != null) {
+ switch (dbType) {
+ case MYSQL:
+ initSqlPath = "/sql/create/release-1.0.0_schema/mysql/";
+ initSchema(initSqlPath);
+ break;
+ case POSTGRESQL:
+ initSqlPath = "/sql/create/release-1.2.0_schema/postgresql/";
+ initSchema(initSqlPath);
+ break;
+ default:
+ logger.error("not support sql type: {},can't upgrade", dbType);
+ throw new IllegalArgumentException("not support sql type,can't upgrade");
+ }
+ }
}
-
- public void initEschedulerSchema() {
+ public void initSchema(String initSqlPath) {
// Execute the escheduler DDL, it cannot be rolled back
- runInitEschedulerDDL();
+ runInitDDL(initSqlPath);
// Execute the escheduler DML, it can be rolled back
- runInitEschedulerDML();
+ runInitDML(initSqlPath);
}
- private void runInitEschedulerDML() {
+ private void runInitDML(String initSqlPath) {
Connection conn = null;
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
- String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql";
+ //String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql";
+ String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_dml.sql";
try {
conn = ConnectionFactory.getDataSource().getConnection();
conn.setAutoCommit(false);
@@ -98,18 +124,19 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
- MysqlUtils.releaseResource(null, null, conn);
+ ConnectionUtils.releaseResource(null, null, conn);
}
}
- private void runInitEschedulerDDL() {
+ private void runInitDDL(String initSqlPath) {
Connection conn = null;
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
- String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/escheduler_ddl.sql";
+ //String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql";
+ String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_ddl.sql";
try {
conn = ConnectionFactory.getDataSource().getConnection();
// Execute the escheduler_ddl.sql script to create the table structure of escheduler
@@ -126,7 +153,7 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
- MysqlUtils.releaseResource(null, null, conn);
+ ConnectionUtils.releaseResource(null, null, conn);
}
@@ -137,26 +164,7 @@ public class UpgradeDao extends AbstractBaseDao {
* @param tableName
* @return
*/
- public boolean isExistsTable(String tableName) {
- Connection conn = null;
- try {
- conn = ConnectionFactory.getDataSource().getConnection();
- ResultSet rs = conn.getMetaData().getTables(null, null, tableName, null);
- if (rs.next()) {
- return true;
- } else {
- return false;
- }
-
- } catch (SQLException e) {
- logger.error(e.getMessage(),e);
- throw new RuntimeException(e.getMessage(),e);
- } finally {
- MysqlUtils.releaseResource(null, null, conn);
-
- }
-
- }
+ public abstract boolean isExistsTable(String tableName);
/**
* Determines whether a field exists in the specified table
@@ -164,26 +172,7 @@ public class UpgradeDao extends AbstractBaseDao {
* @param columnName
* @return
*/
- public boolean isExistsColumn(String tableName,String columnName) {
- Connection conn = null;
- try {
- conn = ConnectionFactory.getDataSource().getConnection();
- ResultSet rs = conn.getMetaData().getColumns(null,null,tableName,columnName);
- if (rs.next()) {
- return true;
- } else {
- return false;
- }
-
- } catch (SQLException e) {
- logger.error(e.getMessage(),e);
- throw new RuntimeException(e.getMessage(),e);
- } finally {
- MysqlUtils.releaseResource(null, null, conn);
-
- }
-
- }
+ public abstract boolean isExistsColumn(String tableName,String columnName);
public String getCurrentVersion() {
@@ -207,26 +196,26 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e);
throw new RuntimeException("sql: " + sql, e);
} finally {
- MysqlUtils.releaseResource(rs, pstmt, conn);
-
+ ConnectionUtils.releaseResource(rs, pstmt, conn);
}
}
- public void upgradeEscheduler(String schemaDir) {
+ public void upgradeDolphinScheduler(String schemaDir) {
- upgradeEschedulerDDL(schemaDir);
+ upgradeDolphinSchedulerDDL(schemaDir);
- upgradeEschedulerDML(schemaDir);
+ upgradeDolphinSchedulerDML(schemaDir);
}
- private void upgradeEschedulerDML(String schemaDir) {
+ private void upgradeDolphinSchedulerDML(String schemaDir) {
String schemaVersion = schemaDir.split("_")[0];
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
- String mysqlSQLFilePath = rootDir + "/sql/upgrade/" + schemaDir + "/mysql/escheduler_dml.sql";
+ String mysqlSQLFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_dml.sql",rootDir,schemaDir,getDbType().name().toLowerCase());
+ logger.info("mysqlSQLFilePath"+mysqlSQLFilePath);
Connection conn = null;
PreparedStatement pstmt = null;
try {
@@ -277,16 +266,16 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
- MysqlUtils.releaseResource(null, pstmt, conn);
+ ConnectionUtils.releaseResource(null, pstmt, conn);
}
}
- private void upgradeEschedulerDDL(String schemaDir) {
+ private void upgradeDolphinSchedulerDDL(String schemaDir) {
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
- String mysqlSQLFilePath = rootDir + "/sql/upgrade/" + schemaDir + "/mysql/escheduler_ddl.sql";
+ String mysqlSQLFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_dml.sql",rootDir,schemaDir,getDbType().name().toLowerCase());
Connection conn = null;
PreparedStatement pstmt = null;
try {
@@ -316,7 +305,7 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
- MysqlUtils.releaseResource(null, pstmt, conn);
+ ConnectionUtils.releaseResource(null, pstmt, conn);
}
}
@@ -338,7 +327,7 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e);
throw new RuntimeException("sql: " + upgradeSQL, e);
} finally {
- MysqlUtils.releaseResource(null, pstmt, conn);
+ ConnectionUtils.releaseResource(null, pstmt, conn);
}
}
diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateDolphinScheduler.java
similarity index 66%
rename from escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java
rename to escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateDolphinScheduler.java
index 2f1e070e7b..2c827dfea4 100644
--- a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java
+++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateDolphinScheduler.java
@@ -16,29 +16,29 @@
*/
package cn.escheduler.dao.upgrade.shell;
-import cn.escheduler.dao.upgrade.EschedulerManager;
+import cn.escheduler.dao.upgrade.DolphinSchedulerManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * init escheduler
+ * init DolphinScheduler
*
*/
-public class CreateEscheduler {
+public class CreateDolphinScheduler {
- private static final Logger logger = LoggerFactory.getLogger(CreateEscheduler.class);
+ private static final Logger logger = LoggerFactory.getLogger(CreateDolphinScheduler.class);
public static void main(String[] args) {
- EschedulerManager eschedulerManager = new EschedulerManager();
+ DolphinSchedulerManager dolphinSchedulerManager = new DolphinSchedulerManager();
try {
- eschedulerManager.initEscheduler();
- logger.info("init escheduler finished");
- eschedulerManager.upgradeEscheduler();
- logger.info("upgrade escheduler finished");
- logger.info("create escheduler success");
+ dolphinSchedulerManager.initDolphinScheduler();
+ logger.info("init DolphinScheduler finished");
+ dolphinSchedulerManager.upgradeDolphinScheduler();
+ logger.info("upgrade DolphinScheduler finished");
+ logger.info("create DolphinScheduler success");
} catch (Exception e) {
- logger.error("create escheduler failed",e);
+ logger.error("create DolphinScheduler failed",e);
}
}
diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitEscheduler.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitDolphinScheduler.java
similarity index 72%
rename from escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitEscheduler.java
rename to escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitDolphinScheduler.java
index e88bb1e3f1..4c01f7413b 100644
--- a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitEscheduler.java
+++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitDolphinScheduler.java
@@ -16,23 +16,23 @@
*/
package cn.escheduler.dao.upgrade.shell;
-import cn.escheduler.dao.upgrade.EschedulerManager;
+import cn.escheduler.dao.upgrade.DolphinSchedulerManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * init escheduler
+ * init DolphinScheduler
*
*/
-public class InitEscheduler {
+public class InitDolphinScheduler {
- private static final Logger logger = LoggerFactory.getLogger(InitEscheduler.class);
+ private static final Logger logger = LoggerFactory.getLogger(InitDolphinScheduler.class);
public static void main(String[] args) {
- Thread.currentThread().setName("manager-InitEscheduler");
- EschedulerManager eschedulerManager = new EschedulerManager();
- eschedulerManager.initEscheduler();
- logger.info("init escheduler finished");
+ Thread.currentThread().setName("manager-InitDolphinScheduler");
+ DolphinSchedulerManager dolphinSchedulerManager = new DolphinSchedulerManager();
+ dolphinSchedulerManager.initDolphinScheduler();
+ logger.info("init DolphinScheduler finished");
}
}
diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeDolphinScheduler.java
similarity index 73%
rename from escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java
rename to escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeDolphinScheduler.java
index 7608d8ce6f..56e706cd2b 100644
--- a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java
+++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeDolphinScheduler.java
@@ -16,28 +16,26 @@
*/
package cn.escheduler.dao.upgrade.shell;
-import cn.escheduler.dao.upgrade.EschedulerManager;
+import cn.escheduler.dao.upgrade.DolphinSchedulerManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * upgrade escheduler database
+ * upgrade DolphinScheduler database
*/
-public class UpgradeEscheduler {
- private static final Logger logger = LoggerFactory.getLogger(UpgradeEscheduler.class);
+public class UpgradeDolphinScheduler {
+ private static final Logger logger = LoggerFactory.getLogger(UpgradeDolphinScheduler.class);
public static void main(String[] args) {
- EschedulerManager eschedulerManager = new EschedulerManager();
+ DolphinSchedulerManager dolphinSchedulerManager = new DolphinSchedulerManager();
try {
- eschedulerManager.upgradeEscheduler();
- logger.info("upgrade escheduler success");
+ dolphinSchedulerManager.upgradeDolphinScheduler();
+ logger.info("upgrade DolphinScheduler success");
} catch (Exception e) {
logger.error(e.getMessage(),e);
- logger.info("Upgrade escheduler failed");
+ logger.info("Upgrade DolphinScheduler failed");
throw new RuntimeException(e);
}
-
-
}
diff --git a/script/create_escheduler.sh b/script/create_escheduler.sh
index c88da7bb6d..ded20a29b3 100644
--- a/script/create_escheduler.sh
+++ b/script/create_escheduler.sh
@@ -13,7 +13,7 @@ export ESCHEDULER_LIB_JARS=$ESCHEDULER_HOME/lib/*
export ESCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
export STOP_TIMEOUT=5
-CLASS=cn.escheduler.dao.upgrade.shell.CreateEscheduler
+CLASS=cn.escheduler.dao.upgrade.shell.CreateDolphinScheduler
exec_command="$ESCHEDULER_OPTS -classpath $ESCHEDULER_CONF_DIR:$ESCHEDULER_LIB_JARS $CLASS"
diff --git a/script/upgrade_escheduler.sh b/script/upgrade_escheduler.sh
index 6bd6439a58..453bd611ac 100644
--- a/script/upgrade_escheduler.sh
+++ b/script/upgrade_escheduler.sh
@@ -13,7 +13,7 @@ export ESCHEDULER_LIB_JARS=$ESCHEDULER_HOME/lib/*
export ESCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
export STOP_TIMEOUT=5
-CLASS=cn.escheduler.dao.upgrade.shell.UpgradeEscheduler
+CLASS=cn.escheduler.dao.upgrade.shell.UpgradeDolphinScheduler
exec_command="$ESCHEDULER_OPTS -classpath $ESCHEDULER_CONF_DIR:$ESCHEDULER_LIB_JARS $CLASS"
diff --git a/sql/create/release-1.0.0_schema/mysql/escheduler_ddl.sql b/sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql
similarity index 100%
rename from sql/create/release-1.0.0_schema/mysql/escheduler_ddl.sql
rename to sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql
diff --git a/sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql b/sql/create/release-1.0.0_schema/mysql/dolphinscheduler_dml.sql
similarity index 100%
rename from sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql
rename to sql/create/release-1.0.0_schema/mysql/dolphinscheduler_dml.sql
diff --git a/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_ddl.sql b/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_ddl.sql
new file mode 100644
index 0000000000..3dc3a5b9a3
--- /dev/null
+++ b/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_ddl.sql
@@ -0,0 +1,804 @@
+DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
+CREATE TABLE QRTZ_BLOB_TRIGGERS (
+ SCHED_NAME varchar(120) NOT NULL,
+ TRIGGER_NAME varchar(200) NOT NULL,
+ TRIGGER_GROUP varchar(200) NOT NULL,
+ BLOB_DATA bytea NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+--
+-- Table structure for table QRTZ_CALENDARS
+--
+
+DROP TABLE IF EXISTS QRTZ_CALENDARS;
+CREATE TABLE QRTZ_CALENDARS (
+ SCHED_NAME varchar(120) NOT NULL,
+ CALENDAR_NAME varchar(200) NOT NULL,
+ CALENDAR bytea NOT NULL,
+ PRIMARY KEY (SCHED_NAME,CALENDAR_NAME)
+);
+--
+-- Table structure for table QRTZ_CRON_TRIGGERS
+--
+
+DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
+CREATE TABLE QRTZ_CRON_TRIGGERS (
+ SCHED_NAME varchar(120) NOT NULL,
+ TRIGGER_NAME varchar(200) NOT NULL,
+ TRIGGER_GROUP varchar(200) NOT NULL,
+ CRON_EXPRESSION varchar(120) NOT NULL,
+ TIME_ZONE_ID varchar(80) DEFAULT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+--
+-- Table structure for table QRTZ_FIRED_TRIGGERS
+--
+
+DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
+CREATE TABLE QRTZ_FIRED_TRIGGERS (
+ SCHED_NAME varchar(120) NOT NULL,
+ ENTRY_ID varchar(95) NOT NULL,
+ TRIGGER_NAME varchar(200) NOT NULL,
+ TRIGGER_GROUP varchar(200) NOT NULL,
+ INSTANCE_NAME varchar(200) NOT NULL,
+ FIRED_TIME bigint NOT NULL,
+ SCHED_TIME bigint NOT NULL,
+ PRIORITY int NOT NULL,
+ STATE varchar(16) NOT NULL,
+ JOB_NAME varchar(200) DEFAULT NULL,
+ JOB_GROUP varchar(200) DEFAULT NULL,
+ IS_NONCONCURRENT varchar(1) DEFAULT NULL,
+ REQUESTS_RECOVERY varchar(1) DEFAULT NULL,
+ PRIMARY KEY (SCHED_NAME,ENTRY_ID)
+) ;
+ create index IDX_QRTZ_FT_TRIG_INST_NAME on QRTZ_FIRED_TRIGGERS (SCHED_NAME,INSTANCE_NAME);
+ create index IDX_QRTZ_FT_INST_JOB_REQ_RCVRY on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+ create index IDX_QRTZ_FT_J_G on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
+ create index IDX_QRTZ_FT_JG on QRTZ_FIRED_TRIGGERS (SCHED_NAME,JOB_GROUP);
+ create index IDX_QRTZ_FT_T_G on QRTZ_FIRED_TRIGGERS (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+ create index IDX_QRTZ_FT_TG on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
+
+--
+-- Table structure for table QRTZ_LOCKS
+--
+
+DROP TABLE IF EXISTS QRTZ_LOCKS;
+CREATE TABLE QRTZ_LOCKS (
+ SCHED_NAME varchar(120) NOT NULL,
+ LOCK_NAME varchar(40) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,LOCK_NAME)
+) ;
+
+--
+-- Table structure for table QRTZ_PAUSED_TRIGGER_GRPS
+--
+
+DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
+CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
+ SCHED_NAME varchar(120) NOT NULL,
+ TRIGGER_GROUP varchar(200) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP)
+) ;
+
+--
+-- Table structure for table QRTZ_SCHEDULER_STATE
+--
+
+DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
+CREATE TABLE QRTZ_SCHEDULER_STATE (
+ SCHED_NAME varchar(120) NOT NULL,
+ INSTANCE_NAME varchar(200) NOT NULL,
+ LAST_CHECKIN_TIME bigint NOT NULL,
+ CHECKIN_INTERVAL bigint NOT NULL,
+ PRIMARY KEY (SCHED_NAME,INSTANCE_NAME)
+) ;
+
+--
+-- Table structure for table QRTZ_SIMPLE_TRIGGERS
+--
+
+DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
+CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
+ SCHED_NAME varchar(120) NOT NULL,
+ TRIGGER_NAME varchar(200) NOT NULL,
+ TRIGGER_GROUP varchar(200) NOT NULL,
+ REPEAT_COUNT bigint NOT NULL,
+ REPEAT_INTERVAL bigint NOT NULL,
+ TIMES_TRIGGERED bigint NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+
+) ;
+
+--
+-- Table structure for table QRTZ_SIMPROP_TRIGGERS
+--
+
+DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
+CREATE TABLE QRTZ_SIMPROP_TRIGGERS (
+ SCHED_NAME varchar(120) NOT NULL,
+ TRIGGER_NAME varchar(200) NOT NULL,
+ TRIGGER_GROUP varchar(200) NOT NULL,
+ STR_PROP_1 varchar(512) DEFAULT NULL,
+ STR_PROP_2 varchar(512) DEFAULT NULL,
+ STR_PROP_3 varchar(512) DEFAULT NULL,
+ INT_PROP_1 int DEFAULT NULL,
+ INT_PROP_2 int DEFAULT NULL,
+ LONG_PROP_1 bigint DEFAULT NULL,
+ LONG_PROP_2 bigint DEFAULT NULL,
+ DEC_PROP_1 decimal(13,4) DEFAULT NULL,
+ DEC_PROP_2 decimal(13,4) DEFAULT NULL,
+ BOOL_PROP_1 varchar(1) DEFAULT NULL,
+ BOOL_PROP_2 varchar(1) DEFAULT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+) ;
+
+--
+-- Table structure for table QRTZ_TRIGGERS
+--
+
+DROP TABLE IF EXISTS QRTZ_TRIGGERS;
+CREATE TABLE QRTZ_TRIGGERS (
+ SCHED_NAME varchar(120) NOT NULL,
+ TRIGGER_NAME varchar(200) NOT NULL,
+ TRIGGER_GROUP varchar(200) NOT NULL,
+ JOB_NAME varchar(200) NOT NULL,
+ JOB_GROUP varchar(200) NOT NULL,
+ DESCRIPTION varchar(250) DEFAULT NULL,
+ NEXT_FIRE_TIME bigint DEFAULT NULL,
+ PREV_FIRE_TIME bigint DEFAULT NULL,
+ PRIORITY int DEFAULT NULL,
+ TRIGGER_STATE varchar(16) NOT NULL,
+ TRIGGER_TYPE varchar(8) NOT NULL,
+ START_TIME bigint NOT NULL,
+ END_TIME bigint DEFAULT NULL,
+ CALENDAR_NAME varchar(200) DEFAULT NULL,
+ MISFIRE_INSTR smallint DEFAULT NULL,
+ JOB_DATA bytea,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+) ;
+
+ create index IDX_QRTZ_T_J on QRTZ_TRIGGERS (SCHED_NAME,JOB_NAME,JOB_GROUP);
+ create index IDX_QRTZ_T_JG on QRTZ_TRIGGERS (SCHED_NAME,JOB_GROUP);
+ create index IDX_QRTZ_T_C on QRTZ_TRIGGERS (SCHED_NAME,CALENDAR_NAME);
+ create index IDX_QRTZ_T_G on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_GROUP);
+ create index IDX_QRTZ_T_STATE on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_STATE);
+ create index IDX_QRTZ_T_N_STATE on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+ create index IDX_QRTZ_T_N_G_STATE on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+ create index IDX_QRTZ_T_NEXT_FIRE_TIME on QRTZ_TRIGGERS (SCHED_NAME,NEXT_FIRE_TIME);
+ create index IDX_QRTZ_T_NFT_ST on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+ create index IDX_QRTZ_T_NFT_MISFIRE on QRTZ_TRIGGERS (SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+ create index IDX_QRTZ_T_NFT_ST_MISFIRE on QRTZ_TRIGGERS (SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+ create index IDX_QRTZ_T_NFT_ST_MISFIRE_GRP on QRTZ_TRIGGERS (SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+
+--
+-- Table structure for table QRTZ_JOB_DETAILS
+--
+
+DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
+CREATE TABLE QRTZ_JOB_DETAILS (
+ SCHED_NAME varchar(120) NOT NULL,
+ JOB_NAME varchar(200) NOT NULL,
+ JOB_GROUP varchar(200) NOT NULL,
+ DESCRIPTION varchar(250) DEFAULT NULL,
+ JOB_CLASS_NAME varchar(250) NOT NULL,
+ IS_DURABLE varchar(1) NOT NULL,
+ IS_NONCONCURRENT varchar(1) NOT NULL,
+ IS_UPDATE_DATA varchar(1) NOT NULL,
+ REQUESTS_RECOVERY varchar(1) NOT NULL,
+ JOB_DATA bytea,
+ PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+) ;
+ create index IDX_QRTZ_J_REQ_RECOVERY on QRTZ_JOB_DETAILS (SCHED_NAME,REQUESTS_RECOVERY);
+ create index IDX_QRTZ_J_GRP on QRTZ_JOB_DETAILS (SCHED_NAME,JOB_GROUP);
+
+alter table QRTZ_BLOB_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_BLOB_TRIGGERS_ibfk_1;
+alter table QRTZ_BLOB_TRIGGERS add CONSTRAINT QRTZ_BLOB_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP);
+
+alter table QRTZ_CRON_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_CRON_TRIGGERS_ibfk_1;
+alter table QRTZ_CRON_TRIGGERS add CONSTRAINT QRTZ_CRON_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP);
+
+alter table QRTZ_SIMPLE_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_SIMPLE_TRIGGERS_ibfk_1;
+alter table QRTZ_SIMPLE_TRIGGERS add CONSTRAINT QRTZ_SIMPLE_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP);
+
+alter table QRTZ_SIMPROP_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_SIMPROP_TRIGGERS_ibfk_1;
+alter table QRTZ_SIMPROP_TRIGGERS add CONSTRAINT QRTZ_SIMPROP_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP);
+
+alter table QRTZ_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_TRIGGERS_ibfk_1;
+alter table QRTZ_TRIGGERS add CONSTRAINT QRTZ_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, JOB_NAME, JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS (SCHED_NAME, JOB_NAME, JOB_GROUP);
+
+
+
+--
+-- Table structure for table t_escheduler_access_token
+--
+
+DROP TABLE IF EXISTS t_escheduler_access_token;
+CREATE TABLE t_escheduler_access_token (
+ id int NOT NULL ,
+ user_id int DEFAULT NULL ,
+ token varchar(64) DEFAULT NULL ,
+ expire_time timestamp DEFAULT NULL ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_alert
+--
+
+DROP TABLE IF EXISTS t_escheduler_alert;
+CREATE TABLE t_escheduler_alert (
+ id int NOT NULL ,
+ title varchar(64) DEFAULT NULL ,
+ show_type int DEFAULT NULL ,
+ content text ,
+ alert_type int DEFAULT NULL ,
+ alert_status int DEFAULT '0' ,
+ log text ,
+ alertgroup_id int DEFAULT NULL ,
+ receivers text ,
+ receivers_cc text ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+--
+-- Table structure for table t_escheduler_alertgroup
+--
+
+DROP TABLE IF EXISTS t_escheduler_alertgroup;
+CREATE TABLE t_escheduler_alertgroup (
+ id int NOT NULL ,
+ group_name varchar(255) DEFAULT NULL ,
+ group_type int DEFAULT NULL ,
+ "desc" varchar(255) DEFAULT NULL ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_command
+--
+
+DROP TABLE IF EXISTS t_escheduler_command;
+CREATE TABLE t_escheduler_command (
+ id int NOT NULL ,
+ command_type int DEFAULT NULL ,
+ process_definition_id int DEFAULT NULL ,
+ command_param text ,
+ task_depend_type int DEFAULT NULL ,
+ failure_strategy int DEFAULT '0' ,
+ warning_type int DEFAULT '0' ,
+ warning_group_id int DEFAULT NULL ,
+ schedule_time timestamp DEFAULT NULL ,
+ start_time timestamp DEFAULT NULL ,
+ executor_id int DEFAULT NULL ,
+ dependence varchar(255) DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ process_instance_priority int DEFAULT NULL ,
+ worker_group_id int DEFAULT '-1' ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_datasource
+--
+
+DROP TABLE IF EXISTS t_escheduler_datasource;
+CREATE TABLE t_escheduler_datasource (
+ id int NOT NULL ,
+ name varchar(64) NOT NULL ,
+ note varchar(256) DEFAULT NULL ,
+ type int NOT NULL ,
+ user_id int NOT NULL ,
+ connection_params text NOT NULL ,
+ create_time timestamp NOT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_error_command
+--
+
+DROP TABLE IF EXISTS t_escheduler_error_command;
+CREATE TABLE t_escheduler_error_command (
+ id int NOT NULL ,
+ command_type int DEFAULT NULL ,
+ executor_id int DEFAULT NULL ,
+ process_definition_id int DEFAULT NULL ,
+ command_param text ,
+ task_depend_type int DEFAULT NULL ,
+ failure_strategy int DEFAULT '0' ,
+ warning_type int DEFAULT '0' ,
+ warning_group_id int DEFAULT NULL ,
+ schedule_time timestamp DEFAULT NULL ,
+ start_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ dependence text ,
+ process_instance_priority int DEFAULT NULL ,
+ worker_group_id int DEFAULT '-1' ,
+ message text ,
+ PRIMARY KEY (id)
+);
+--
+-- Table structure for table t_escheduler_master_server
+--
+
+DROP TABLE IF EXISTS t_escheduler_master_server;
+CREATE TABLE t_escheduler_master_server (
+ id int NOT NULL ,
+ host varchar(45) DEFAULT NULL ,
+ port int DEFAULT NULL ,
+ zk_directory varchar(64) DEFAULT NULL ,
+ res_info varchar(256) DEFAULT NULL ,
+ create_time timestamp DEFAULT NULL ,
+ last_heartbeat_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_process_definition
+--
+
+DROP TABLE IF EXISTS t_escheduler_process_definition;
+CREATE TABLE t_escheduler_process_definition (
+ id int NOT NULL ,
+ name varchar(255) DEFAULT NULL ,
+ version int DEFAULT NULL ,
+ release_state int DEFAULT NULL ,
+ project_id int DEFAULT NULL ,
+ user_id int DEFAULT NULL ,
+ process_definition_json text ,
+ "desc" text ,
+ global_params text ,
+ flag int DEFAULT NULL ,
+ locations text ,
+ connects text ,
+ receivers text ,
+ receivers_cc text ,
+ create_time timestamp DEFAULT NULL ,
+ timeout int DEFAULT '0' ,
+ tenant_id int NOT NULL DEFAULT '-1' ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+create index process_definition_index on t_escheduler_process_definition (project_id,id);
+
+--
+-- Table structure for table t_escheduler_process_instance
+--
+
+DROP TABLE IF EXISTS t_escheduler_process_instance;
+CREATE TABLE t_escheduler_process_instance (
+ id int NOT NULL ,
+ name varchar(255) DEFAULT NULL ,
+ process_definition_id int DEFAULT NULL ,
+ state int DEFAULT NULL ,
+ recovery int DEFAULT NULL ,
+ start_time timestamp DEFAULT NULL ,
+ end_time timestamp DEFAULT NULL ,
+ run_times int DEFAULT NULL ,
+ host varchar(45) DEFAULT NULL ,
+ command_type int DEFAULT NULL ,
+ command_param text ,
+ task_depend_type int DEFAULT NULL ,
+ max_try_times int DEFAULT '0' ,
+ failure_strategy int DEFAULT '0' ,
+ warning_type int DEFAULT '0' ,
+ warning_group_id int DEFAULT NULL ,
+ schedule_time timestamp DEFAULT NULL ,
+ command_start_time timestamp DEFAULT NULL ,
+ global_params text ,
+ process_instance_json text ,
+ flag int DEFAULT '1' ,
+ update_time timestamp NULL ,
+ is_sub_process int DEFAULT '0' ,
+ executor_id int NOT NULL ,
+ locations text ,
+ connects text ,
+ history_cmd text ,
+ dependence_schedule_times text ,
+ process_instance_priority int DEFAULT NULL ,
+ worker_group_id int DEFAULT '-1' ,
+ timeout int DEFAULT '0' ,
+ tenant_id int NOT NULL DEFAULT '-1' ,
+ PRIMARY KEY (id)
+) ;
+ create index process_instance_index on t_escheduler_process_instance (process_definition_id,id);
+ create index start_time_index on t_escheduler_process_instance (start_time);
+
+--
+-- Table structure for table t_escheduler_project
+--
+
+DROP TABLE IF EXISTS t_escheduler_project;
+CREATE TABLE t_escheduler_project (
+ id int NOT NULL ,
+ name varchar(100) DEFAULT NULL ,
+ ”desc“ varchar(200) DEFAULT NULL ,
+ user_id int DEFAULT NULL ,
+ flag int DEFAULT '1' ,
+ create_time timestamp DEFAULT CURRENT_TIMESTAMP ,
+ update_time timestamp DEFAULT CURRENT_TIMESTAMP ,
+ PRIMARY KEY (id)
+) ;
+ create index user_id_index on t_escheduler_project (user_id);
+
+--
+-- Table structure for table t_escheduler_queue
+--
+
+DROP TABLE IF EXISTS t_escheduler_queue;
+CREATE TABLE t_escheduler_queue (
+ id int NOT NULL ,
+ queue_name varchar(64) DEFAULT NULL ,
+ queue varchar(64) DEFAULT NULL ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+);
+
+
+--
+-- Table structure for table t_escheduler_relation_datasource_user
+--
+
+DROP TABLE IF EXISTS t_escheduler_relation_datasource_user;
+CREATE TABLE t_escheduler_relation_datasource_user (
+ id int NOT NULL ,
+ user_id int NOT NULL ,
+ datasource_id int DEFAULT NULL ,
+ perm int DEFAULT '1' ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+;
+
+--
+-- Table structure for table t_escheduler_relation_process_instance
+--
+
+DROP TABLE IF EXISTS t_escheduler_relation_process_instance;
+CREATE TABLE t_escheduler_relation_process_instance (
+ id int NOT NULL ,
+ parent_process_instance_id int DEFAULT NULL ,
+ parent_task_instance_id int DEFAULT NULL ,
+ process_instance_id int DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+
+--
+-- Table structure for table t_escheduler_relation_project_user
+--
+
+DROP TABLE IF EXISTS t_escheduler_relation_project_user;
+CREATE TABLE t_escheduler_relation_project_user (
+ id int NOT NULL ,
+ user_id int NOT NULL ,
+ project_id int DEFAULT NULL ,
+ perm int DEFAULT '1' ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+create index relation_project_user_id_index on t_escheduler_relation_project_user (user_id);
+
+--
+-- Table structure for table t_escheduler_relation_resources_user
+--
+
+DROP TABLE IF EXISTS t_escheduler_relation_resources_user;
+CREATE TABLE t_escheduler_relation_resources_user (
+ id int NOT NULL ,
+ user_id int NOT NULL ,
+ resources_id int DEFAULT NULL ,
+ perm int DEFAULT '1' ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_relation_udfs_user
+--
+
+DROP TABLE IF EXISTS t_escheduler_relation_udfs_user;
+CREATE TABLE t_escheduler_relation_udfs_user (
+ id int NOT NULL ,
+ user_id int NOT NULL ,
+ udf_id int DEFAULT NULL ,
+ perm int DEFAULT '1' ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+;
+
+--
+-- Table structure for table t_escheduler_relation_user_alertgroup
+--
+
+DROP TABLE IF EXISTS t_escheduler_relation_user_alertgroup;
+CREATE TABLE t_escheduler_relation_user_alertgroup (
+ id int NOT NULL,
+ alertgroup_id int DEFAULT NULL,
+ user_id int DEFAULT NULL,
+ create_time timestamp DEFAULT NULL,
+ update_time timestamp DEFAULT NULL,
+ PRIMARY KEY (id)
+);
+
+--
+-- Table structure for table t_escheduler_resources
+--
+
+DROP TABLE IF EXISTS t_escheduler_resources;
+CREATE TABLE t_escheduler_resources (
+ id int NOT NULL ,
+ alias varchar(64) DEFAULT NULL ,
+ file_name varchar(64) DEFAULT NULL ,
+ "desc" varchar(256) DEFAULT NULL ,
+ user_id int DEFAULT NULL ,
+ type int DEFAULT NULL ,
+ size bigint DEFAULT NULL ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+;
+
+--
+-- Table structure for table t_escheduler_schedules
+--
+
+DROP TABLE IF EXISTS t_escheduler_schedules;
+CREATE TABLE t_escheduler_schedules (
+ id int NOT NULL ,
+ process_definition_id int NOT NULL ,
+ start_time timestamp NOT NULL ,
+ end_time timestamp NOT NULL ,
+ crontab varchar(256) NOT NULL ,
+ failure_strategy int NOT NULL ,
+ user_id int NOT NULL ,
+ release_state int NOT NULL ,
+ warning_type int NOT NULL ,
+ warning_group_id int DEFAULT NULL ,
+ process_instance_priority int DEFAULT NULL ,
+ worker_group_id int DEFAULT '-1' ,
+ create_time timestamp NOT NULL ,
+ update_time timestamp NOT NULL ,
+ PRIMARY KEY (id)
+);
+
+--
+-- Table structure for table t_escheduler_session
+--
+
+DROP TABLE IF EXISTS t_escheduler_session;
+CREATE TABLE t_escheduler_session (
+ id varchar(64) NOT NULL ,
+ user_id int DEFAULT NULL ,
+ ip varchar(45) DEFAULT NULL ,
+ last_login_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+);
+
+--
+-- Table structure for table t_escheduler_task_instance
+--
+
+DROP TABLE IF EXISTS t_escheduler_task_instance;
+CREATE TABLE t_escheduler_task_instance (
+ id int NOT NULL ,
+ name varchar(255) DEFAULT NULL ,
+ task_type varchar(64) DEFAULT NULL ,
+ process_definition_id int DEFAULT NULL ,
+ process_instance_id int DEFAULT NULL ,
+ task_json text ,
+ state int DEFAULT NULL ,
+ submit_time timestamp DEFAULT NULL ,
+ start_time timestamp DEFAULT NULL ,
+ end_time timestamp DEFAULT NULL ,
+ host varchar(45) DEFAULT NULL ,
+ execute_path varchar(200) DEFAULT NULL ,
+ log_path varchar(200) DEFAULT NULL ,
+ alert_flag int DEFAULT NULL ,
+ retry_times int DEFAULT '0' ,
+ pid int DEFAULT NULL ,
+ app_link varchar(255) DEFAULT NULL ,
+ flag int DEFAULT '1' ,
+ retry_interval int DEFAULT NULL ,
+ max_retry_times int DEFAULT NULL ,
+ task_instance_priority int DEFAULT NULL ,
+ worker_group_id int DEFAULT '-1' ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_tenant
+--
+
+DROP TABLE IF EXISTS t_escheduler_tenant;
+CREATE TABLE t_escheduler_tenant (
+ id int NOT NULL ,
+ tenant_code varchar(64) DEFAULT NULL ,
+ tenant_name varchar(64) DEFAULT NULL ,
+ "desc" varchar(256) DEFAULT NULL ,
+ queue_id int DEFAULT NULL ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_udfs
+--
+
+DROP TABLE IF EXISTS t_escheduler_udfs;
+CREATE TABLE t_escheduler_udfs (
+ id int NOT NULL ,
+ user_id int NOT NULL ,
+ func_name varchar(100) NOT NULL ,
+ class_name varchar(255) NOT NULL ,
+ type int NOT NULL ,
+ arg_types varchar(255) DEFAULT NULL ,
+ database varchar(255) DEFAULT NULL ,
+ "desc" varchar(255) DEFAULT NULL ,
+ resource_id int NOT NULL ,
+ resource_name varchar(255) NOT NULL ,
+ create_time timestamp NOT NULL ,
+ update_time timestamp NOT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_user
+--
+
+DROP TABLE IF EXISTS t_escheduler_user;
+CREATE TABLE t_escheduler_user (
+ id int NOT NULL ,
+ user_name varchar(64) DEFAULT NULL ,
+ user_password varchar(64) DEFAULT NULL ,
+ user_type int DEFAULT NULL ,
+ email varchar(64) DEFAULT NULL ,
+ phone varchar(11) DEFAULT NULL ,
+ tenant_id int DEFAULT NULL ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ queue varchar(64) DEFAULT NULL ,
+ PRIMARY KEY (id)
+);
+
+--
+-- Table structure for table t_escheduler_version
+--
+
+DROP TABLE IF EXISTS t_escheduler_version;
+CREATE TABLE t_escheduler_version (
+ id int NOT NULL ,
+ version varchar(200) NOT NULL,
+ PRIMARY KEY (id)
+) ;
+create index version_index on t_escheduler_version(version);
+
+--
+-- Table structure for table t_escheduler_worker_group
+--
+
+DROP TABLE IF EXISTS t_escheduler_worker_group;
+CREATE TABLE t_escheduler_worker_group (
+ id bigint NOT NULL ,
+ name varchar(256) DEFAULT NULL ,
+ ip_list varchar(256) DEFAULT NULL ,
+ create_time timestamp DEFAULT NULL ,
+ update_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+--
+-- Table structure for table t_escheduler_worker_server
+--
+
+DROP TABLE IF EXISTS t_escheduler_worker_server;
+CREATE TABLE t_escheduler_worker_server (
+ id int NOT NULL ,
+ host varchar(45) DEFAULT NULL ,
+ port int DEFAULT NULL ,
+ zk_directory varchar(64) DEFAULT NULL ,
+ res_info varchar(255) DEFAULT NULL ,
+ create_time timestamp DEFAULT NULL ,
+ last_heartbeat_time timestamp DEFAULT NULL ,
+ PRIMARY KEY (id)
+) ;
+
+
+DROP SEQUENCE IF EXISTS t_escheduler_access_token_id_sequence;
+CREATE SEQUENCE t_escheduler_access_token_id_sequence;
+ALTER TABLE t_escheduler_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_access_token_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_alert_id_sequence;
+CREATE SEQUENCE t_escheduler_alert_id_sequence;
+ALTER TABLE t_escheduler_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_alert_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_alertgroup_id_sequence;
+CREATE SEQUENCE t_escheduler_alertgroup_id_sequence;
+ALTER TABLE t_escheduler_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_alertgroup_id_sequence');
+
+DROP SEQUENCE IF EXISTS t_escheduler_command_id_sequence;
+CREATE SEQUENCE t_escheduler_command_id_sequence;
+ALTER TABLE t_escheduler_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_command_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_datasource_id_sequence;
+CREATE SEQUENCE t_escheduler_datasource_id_sequence;
+ALTER TABLE t_escheduler_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_datasource_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_master_server_id_sequence;
+CREATE SEQUENCE t_escheduler_master_server_id_sequence;
+ALTER TABLE t_escheduler_master_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_master_server_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_process_definition_id_sequence;
+CREATE SEQUENCE t_escheduler_process_definition_id_sequence;
+ALTER TABLE t_escheduler_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_process_definition_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_process_instance_id_sequence;
+CREATE SEQUENCE t_escheduler_process_instance_id_sequence;
+ALTER TABLE t_escheduler_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_process_instance_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_project_id_sequence;
+CREATE SEQUENCE t_escheduler_project_id_sequence;
+ALTER TABLE t_escheduler_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_project_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_queue_id_sequence;
+CREATE SEQUENCE t_escheduler_queue_id_sequence;
+ALTER TABLE t_escheduler_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_queue_id_sequence');
+
+DROP SEQUENCE IF EXISTS t_escheduler_relation_datasource_user_id_sequence;
+CREATE SEQUENCE t_escheduler_relation_datasource_user_id_sequence;
+ALTER TABLE t_escheduler_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_datasource_user_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_relation_process_instance_id_sequence;
+CREATE SEQUENCE t_escheduler_relation_process_instance_id_sequence;
+ALTER TABLE t_escheduler_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_process_instance_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_relation_project_user_id_sequence;
+CREATE SEQUENCE t_escheduler_relation_project_user_id_sequence;
+ALTER TABLE t_escheduler_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_project_user_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_relation_resources_user_id_sequence;
+CREATE SEQUENCE t_escheduler_relation_resources_user_id_sequence;
+ALTER TABLE t_escheduler_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_resources_user_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_relation_udfs_user_id_sequence;
+CREATE SEQUENCE t_escheduler_relation_udfs_user_id_sequence;
+ALTER TABLE t_escheduler_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_udfs_user_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_relation_user_alertgroup_id_sequence;
+CREATE SEQUENCE t_escheduler_relation_user_alertgroup_id_sequence;
+ALTER TABLE t_escheduler_relation_user_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_user_alertgroup_id_sequence');
+
+DROP SEQUENCE IF EXISTS t_escheduler_resources_id_sequence;
+CREATE SEQUENCE t_escheduler_resources_id_sequence;
+ALTER TABLE t_escheduler_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_resources_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_schedules_id_sequence;
+CREATE SEQUENCE t_escheduler_schedules_id_sequence;
+ALTER TABLE t_escheduler_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_schedules_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_task_instance_id_sequence;
+CREATE SEQUENCE t_escheduler_task_instance_id_sequence;
+ALTER TABLE t_escheduler_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_task_instance_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_tenant_id_sequence;
+CREATE SEQUENCE t_escheduler_tenant_id_sequence;
+ALTER TABLE t_escheduler_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_tenant_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_udfs_id_sequence;
+CREATE SEQUENCE t_escheduler_udfs_id_sequence;
+ALTER TABLE t_escheduler_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_udfs_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_user_id_sequence;
+CREATE SEQUENCE t_escheduler_user_id_sequence;
+ALTER TABLE t_escheduler_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_user_id_sequence');
+
+DROP SEQUENCE IF EXISTS t_escheduler_version_id_sequence;
+CREATE SEQUENCE t_escheduler_version_id_sequence;
+ALTER TABLE t_escheduler_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_version_id_sequence');
+
+DROP SEQUENCE IF EXISTS t_escheduler_worker_group_id_sequence;
+CREATE SEQUENCE t_escheduler_worker_group_id_sequence;
+ALTER TABLE t_escheduler_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_worker_group_id_sequence');
+DROP SEQUENCE IF EXISTS t_escheduler_worker_server_id_sequence;
+CREATE SEQUENCE t_escheduler_worker_server_id_sequence;
+ALTER TABLE t_escheduler_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_worker_server_id_sequence');
\ No newline at end of file
diff --git a/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_dml.sql b/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_dml.sql
new file mode 100644
index 0000000000..72e60ace3b
--- /dev/null
+++ b/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_dml.sql
@@ -0,0 +1,8 @@
+-- Records of t_escheduler_user,user : admin , password : escheduler123
+INSERT INTO "t_escheduler_user" VALUES ('1', 'admin', '055a97b5fcd6d120372ad1976518f371', '0', 'xxx@qq.com', 'xx', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22');
+INSERT INTO "t_escheduler_alertgroup" VALUES (1, 'escheduler管理员告警组', '0', 'escheduler管理员告警组','2018-11-29 10:20:39', '2018-11-29 10:20:39');
+INSERT INTO "t_escheduler_relation_user_alertgroup" VALUES ('1', '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
+
+-- Records of t_escheduler_queue,default queue name : default
+INSERT INTO "t_escheduler_queue" VALUES ('1', 'default', 'default');
+INSERT INTO "t_escheduler_version" VALUES ('1', '1.2.0');
\ No newline at end of file
diff --git a/sql/soft_version b/sql/soft_version
index 1cc5f657e0..867e52437a 100644
--- a/sql/soft_version
+++ b/sql/soft_version
@@ -1 +1 @@
-1.1.0
\ No newline at end of file
+1.2.0
\ No newline at end of file
diff --git a/sql/upgrade/1.0.1_schema/mysql/escheduler_ddl.sql b/sql/upgrade/1.0.1_schema/mysql/dolphinscheduler_ddl.sql
similarity index 100%
rename from sql/upgrade/1.0.1_schema/mysql/escheduler_ddl.sql
rename to sql/upgrade/1.0.1_schema/mysql/dolphinscheduler_ddl.sql
diff --git a/sql/upgrade/1.0.1_schema/mysql/escheduler_dml.sql b/sql/upgrade/1.0.1_schema/mysql/dolphinscheduler_dml.sql
similarity index 100%
rename from sql/upgrade/1.0.1_schema/mysql/escheduler_dml.sql
rename to sql/upgrade/1.0.1_schema/mysql/dolphinscheduler_dml.sql
diff --git a/sql/upgrade/1.0.2_schema/mysql/escheduler_ddl.sql b/sql/upgrade/1.0.2_schema/mysql/dolphinscheduler_ddl.sql
similarity index 100%
rename from sql/upgrade/1.0.2_schema/mysql/escheduler_ddl.sql
rename to sql/upgrade/1.0.2_schema/mysql/dolphinscheduler_ddl.sql
diff --git a/sql/upgrade/1.0.2_schema/mysql/escheduler_dml.sql b/sql/upgrade/1.0.2_schema/mysql/dolphinscheduler_dml.sql
similarity index 100%
rename from sql/upgrade/1.0.2_schema/mysql/escheduler_dml.sql
rename to sql/upgrade/1.0.2_schema/mysql/dolphinscheduler_dml.sql
diff --git a/sql/upgrade/1.1.0_schema/mysql/escheduler_ddl.sql b/sql/upgrade/1.1.0_schema/mysql/dolphinscheduler_ddl.sql
similarity index 100%
rename from sql/upgrade/1.1.0_schema/mysql/escheduler_ddl.sql
rename to sql/upgrade/1.1.0_schema/mysql/dolphinscheduler_ddl.sql
diff --git a/sql/upgrade/1.1.0_schema/mysql/escheduler_dml.sql b/sql/upgrade/1.1.0_schema/mysql/dolphinscheduler_dml.sql
similarity index 100%
rename from sql/upgrade/1.1.0_schema/mysql/escheduler_dml.sql
rename to sql/upgrade/1.1.0_schema/mysql/dolphinscheduler_dml.sql
diff --git a/sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_ddl.sql b/sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_ddl.sql
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_dml.sql b/sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_dml.sql
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_ddl.sql b/sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_ddl.sql
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_dml.sql b/sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_dml.sql
new file mode 100644
index 0000000000..e69de29bb2
From 055b071348e3e506508567ad3ce153706955386b Mon Sep 17 00:00:00 2001
From: qrfxiaoge
Date: Wed, 18 Sep 2019 11:47:41 +0800
Subject: [PATCH 07/23] bugfix-user-specified queue takes precedence over
tenant queue (#769)
Solve the problem that the user queue setting does not take effect
---
.../src/main/java/cn/escheduler/dao/ProcessDao.java | 2 +-
.../cn/escheduler/server/worker/runner/FetchTaskThread.java | 5 +++--
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java b/escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java
index ee0dd48575..73ae868c80 100644
--- a/escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java
+++ b/escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java
@@ -1739,7 +1739,7 @@ public class ProcessDao extends AbstractBaseDao {
* @param processInstanceId
* @return
*/
- public String queryQueueByProcessInstanceId(int processInstanceId){
+ public String queryUserQueueByProcessInstanceId(int processInstanceId){
return userMapper.queryQueueByProcessInstanceId(processInstanceId);
}
diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java
index 31c4def1da..07ca740d24 100644
--- a/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java
+++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java
@@ -187,8 +187,9 @@ public class FetchTaskThread implements Runnable{
continue;
}
- // set queue for process instance
- taskInstance.getProcessInstance().setQueue(tenant.getQueue());
+ // set queue for process instance, user-specified queue takes precedence over tenant queue
+ String userQueue = processDao.queryUserQueueByProcessInstanceId(taskInstance.getProcessInstanceId());
+ taskInstance.getProcessInstance().setQueue(StringUtils.isEmpty(userQueue) ? tenant.getQueue() : userQueue);
logger.info("worker fetch taskId : {} from queue ", taskInstId);
From 642f3093a42476739b54d32eb363459efee7d7bd Mon Sep 17 00:00:00 2001
From: fancyChuan <1247375074@qq.com>
Date: Wed, 18 Sep 2019 14:59:30 +0800
Subject: [PATCH 08/23] update markdown docs which can not display images
normally (#806)
---
docs/en_US/architecture-design.md | 24 ++++++++++++------------
docs/zh_CN/系统使用手册.md | 2 +-
docs/zh_CN/系统架构设计.md | 26 +++++++++++++-------------
3 files changed, 26 insertions(+), 26 deletions(-)
diff --git a/docs/en_US/architecture-design.md b/docs/en_US/architecture-design.md
index f901fde3dc..e1c8f01f34 100644
--- a/docs/en_US/architecture-design.md
+++ b/docs/en_US/architecture-design.md
@@ -6,7 +6,7 @@ Before explaining the architecture of the schedule system, let us first understa
**DAG:** Full name Directed Acyclic Graph,referred to as DAG。Tasks in the workflow are assembled in the form of directed acyclic graphs, which are topologically traversed from nodes with zero indegrees of ingress until there are no successor nodes. For example, the following picture:
-
+
dag example
@@ -111,7 +111,7 @@ Before explaining the architecture of the schedule system, let us first understa
The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into two roles according to their roles:
-
+
- The role of Master is mainly responsible for task distribution and supervising the health status of Slave. It can dynamically balance the task to Slave, so that the Slave node will not be "busy" or "free".
@@ -125,7 +125,7 @@ Problems in the design of centralized :
###### Decentralization
+
- In the decentralized design, there is usually no Master/Slave concept, all roles are the same, the status is equal, the global Internet is a typical decentralized distributed system, networked arbitrary node equipment down machine , all will only affect a small range of features.
@@ -141,13 +141,13 @@ EasyScheduler uses ZooKeeper distributed locks to implement only one Master to e
1. The core process algorithm for obtaining distributed locks is as follows
-
+
2. Scheduler thread distributed lock implementation flow chart in EasyScheduler:
-
+
##### Third, the thread is insufficient loop waiting problem
@@ -156,7 +156,7 @@ EasyScheduler uses ZooKeeper distributed locks to implement only one Master to e
- If a large number of sub-processes are nested in a large DAG, the following figure will result in a "dead" state:
-
+
In the above figure, MainFlowThread waits for SubFlowThread1 to end, SubFlowThread1 waits for SubFlowThread2 to end, SubFlowThread2 waits for SubFlowThread3 to end, and SubFlowThread3 waits for a new thread in the thread pool, then the entire DAG process cannot end, and thus the thread cannot be released. This forms the state of the child parent process loop waiting. At this point, the scheduling cluster will no longer be available unless a new Master is started to add threads to break such a "stuck."
@@ -180,7 +180,7 @@ Fault tolerance is divided into service fault tolerance and task retry. Service
Service fault tolerance design relies on ZooKeeper's Watcher mechanism. The implementation principle is as follows:
-
+
The Master monitors the directories of other Masters and Workers. If the remove event is detected, the process instance is fault-tolerant or the task instance is fault-tolerant according to the specific business logic.
@@ -190,7 +190,7 @@ The Master monitors the directories of other Masters and Workers. If the remove
- Master fault tolerance flow chart:
-
+
After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler thread in EasyScheduler. It traverses the DAG to find the "Running" and "Submit Successful" tasks, and monitors the status of its task instance for the "Running" task. You need to determine whether the Task Queue already exists. If it exists, monitor the status of the task instance. If it does not exist, resubmit the task instance.
@@ -200,7 +200,7 @@ After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler
- Worker fault tolerance flow chart:
-
+
Once the Master Scheduler thread finds the task instance as "need to be fault tolerant", it takes over the task and resubmits.
@@ -239,13 +239,13 @@ In the early scheduling design, if there is no priority design and fair scheduli
- The priority of the process definition is that some processes need to be processed before other processes. This can be configured at the start of the process or at the time of scheduled start. There are 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
-
+
- The priority of the task is also divided into 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
-
+
##### VI. Logback and gRPC implement log access
@@ -256,7 +256,7 @@ In the early scheduling design, if there is no priority design and fair scheduli
- Considering the lightweightness of EasyScheduler as much as possible, gRPC was chosen to implement remote access log information.
-
+
- We use a custom Logback FileAppender and Filter function to generate a log file for each task instance.
diff --git a/docs/zh_CN/系统使用手册.md b/docs/zh_CN/系统使用手册.md
index 2e5ee635b3..4d0cc21195 100644
--- a/docs/zh_CN/系统使用手册.md
+++ b/docs/zh_CN/系统使用手册.md
@@ -110,7 +110,7 @@
> 点击任务实例节点,点击**查看历史**,可以查看该工作流实例运行的该任务实例列表