diff --git a/README.md b/README.md index 18e2df8da3..51e0cbb8bc 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ Overload processing: Task queue mechanism, the number of schedulable tasks on a - Online Demo -More documentation please refer to [EasyScheduler online documentation] +More documentation please refer to [EasyScheduler online documentation] ### Recent R&D plan Work plan of Easy Scheduler: [R&D plan](https://github.com/analysys/EasyScheduler/projects/1), where `In Develop` card is the features of 1.1.0 version , TODO card is to be done (including feature ideas) @@ -80,7 +80,9 @@ Easy Scheduler uses a lot of excellent open source projects, such as google guav It is because of the shoulders of these open source projects that the birth of the Easy Scheduler is possible. We are very grateful for all the open source software used! We also hope that we will not only be the beneficiaries of open source, but also be open source contributors, so we decided to contribute to easy scheduling and promised long-term updates. We also hope that partners who have the same passion and conviction for open source will join in and contribute to open source! ### Get Help -The fastest way to get response from our developers is to submit issues, or add our wechat : 510570367 +1. Submit an issue +1. Mail list: dev@dolphinscheduler.apache.org. Mail to dev-subscribe@dolphinscheduler.apache.org, follow the reply to subscribe the mail list. +1. Contact WeChat group manager, ID 510570367. This is for Mandarin(CN) discussion. ### License Please refer to [LICENSE](https://github.com/analysys/EasyScheduler/blob/dev/LICENSE) file. diff --git a/dockerfile/conf/escheduler/conf/i18n/messages.properties b/dockerfile/conf/escheduler/conf/i18n/messages.properties index a663c71013..88509558a9 100644 --- a/dockerfile/conf/escheduler/conf/i18n/messages.properties +++ b/dockerfile/conf/escheduler/conf/i18n/messages.properties @@ -111,6 +111,7 @@ UPDATE_PROJECT_NOTES=update project PROJECT_ID=project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING +QUERY_ALL_PROJECT_LIST_NOTES=query all project list DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project @@ -152,10 +153,12 @@ VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name LOGIN_NOTES=user login UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition PROCESS_DEFINITION_ID=process definition id +PROCESS_DEFINITION_IDS=process definition ids RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging +QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) @@ -170,6 +173,9 @@ LIMIT=limit VIEW_TREE_NOTES=view tree GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id PROCESS_DEFINITION_ID_LIST=process definition id list +QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id +DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id +BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id diff --git a/dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties b/dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties index a663c71013..d06b83fed5 100644 --- a/dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties +++ b/dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties @@ -111,6 +111,7 @@ UPDATE_PROJECT_NOTES=update project PROJECT_ID=project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING +QUERY_ALL_PROJECT_LIST_NOTES=query all project list DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project @@ -152,10 +153,12 @@ VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name LOGIN_NOTES=user login UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition PROCESS_DEFINITION_ID=process definition id +PROCESS_DEFINITION_IDS=process definition ids RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging +QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) @@ -170,6 +173,9 @@ LIMIT=limit VIEW_TREE_NOTES=view tree GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id PROCESS_DEFINITION_ID_LIST=process definition id list +QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id +DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id +BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id diff --git a/dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties b/dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties index b0d6694d2b..f23e6b7e36 100644 --- a/dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties +++ b/dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties @@ -110,6 +110,7 @@ UPDATE_PROJECT_NOTES=更新项目 PROJECT_ID=项目ID QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 +QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目 DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 @@ -155,8 +156,10 @@ RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义 QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表 QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 +QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义 PAGE_NO=页码号 PROCESS_INSTANCE_ID=流程实例ID +PROCESS_INSTANCE_IDS=流程实例ID集合 PROCESS_INSTANCE_JSON=流程实例信息(json格式) SCHEDULE_TIME=定时时间 SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 @@ -168,6 +171,9 @@ LIMIT=显示多少条 VIEW_TREE_NOTES=树状图 GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID PROCESS_DEFINITION_ID_LIST=流程定义id列表 +QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID +DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID +BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合 QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID TASK_ID=任务实例ID diff --git a/docs/en_US/EasyScheduler Proposal.md b/docs/en_US/EasyScheduler Proposal.md index 965605f63b..6bcea73540 100644 --- a/docs/en_US/EasyScheduler Proposal.md +++ b/docs/en_US/EasyScheduler Proposal.md @@ -6,7 +6,7 @@ EasyScheduler is a distributed ETL scheduling engine with powerful DAG visualiza ## Proposal -EasyScheduler provides many easy-to-use features to accelerate the engineer enficiency on data ETL workflow job. We propose a new concept of 'instance of process' and 'instance of task' to let developers to tuning their jobs on the running state of workflow instead of changing the task's template. Its main objectives are as follows: +EasyScheduler provides many easy-to-use features to accelerate the engineer efficiency on data ETL workflow job. We propose a new concept of 'instance of process' and 'instance of task' to let developers to tuning their jobs on the running state of workflow instead of changing the task's template. Its main objectives are as follows: - Define the complex tasks' dependencies & triggers in a DAG graph by dragging and dropping. - Support cluster HA. @@ -30,7 +30,7 @@ The codes are already under Apache License Version 2.0. We want to find a data processing tool with the following features: -- Easy to use,developers can build a ETL process with a very simple drag and drop operation. not only for ETL developers,people who can't write code also can use this tool for ETL operation such as system adminitrator. +- Easy to use,developers can build a ETL process with a very simple drag and drop operation. not only for ETL developers,people who can't write code also can use this tool for ETL operation such as system administrator. - Solving the problem of "complex task dependencies" , and it can monitor the ETL running status. - Support multi-tenant. - Support many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc. @@ -73,7 +73,7 @@ Thus, it is very unlikely that EasyScheduler becomes orphaned. EasyScheduler's core developers have been running it as a community-oriented open source project for some time, several of them already have experience working with open source communities, they are also active in presto, alluxio and other projects. At the same time, we will learn more open source experiences by following the Apache way in our incubator journey. -### Homogenous Developers +### Homogeneous Developers The current developers work across a variety of organizations including Analysys, guandata and hydee; some individual developers are accepted as developers of EasyScheduler as well. @@ -110,7 +110,7 @@ The project consists of three distinct codebases: core and document. The address ## Source and Intellectual Property Submission Plan -As soon as EasyScheduler is approved to join Apache Incubator, Analysys will provide the Software Grant Agreement(SGA) and intial committers will submit ICLA(s). The code is already licensed under the Apache Software License, version 2.0. +As soon as EasyScheduler is approved to join Apache Incubator, Analysys will provide the Software Grant Agreement(SGA) and initial committers will submit ICLA(s). The code is already licensed under the Apache Software License, version 2.0. ## External Dependencies diff --git a/docs/en_US/architecture-design.md b/docs/en_US/architecture-design.md index f901fde3dc..e1c8f01f34 100644 --- a/docs/en_US/architecture-design.md +++ b/docs/en_US/architecture-design.md @@ -6,7 +6,7 @@ Before explaining the architecture of the schedule system, let us first understa **DAG:** Full name Directed Acyclic Graph,referred to as DAG。Tasks in the workflow are assembled in the form of directed acyclic graphs, which are topologically traversed from nodes with zero indegrees of ingress until there are no successor nodes. For example, the following picture:

- dag示例 + dag示例

dag example

@@ -111,7 +111,7 @@ Before explaining the architecture of the schedule system, let us first understa The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into two roles according to their roles:

- master-slave role + master-slave role

- The role of Master is mainly responsible for task distribution and supervising the health status of Slave. It can dynamically balance the task to Slave, so that the Slave node will not be "busy" or "free". @@ -125,7 +125,7 @@ Problems in the design of centralized : ###### Decentralization

+ decentralized

- In the decentralized design, there is usually no Master/Slave concept, all roles are the same, the status is equal, the global Internet is a typical decentralized distributed system, networked arbitrary node equipment down machine , all will only affect a small range of features. @@ -141,13 +141,13 @@ EasyScheduler uses ZooKeeper distributed locks to implement only one Master to e 1. The core process algorithm for obtaining distributed locks is as follows

- Get Distributed Lock Process + Get Distributed Lock Process

2. Scheduler thread distributed lock implementation flow chart in EasyScheduler:

- Get Distributed Lock Process + Get Distributed Lock Process

##### Third, the thread is insufficient loop waiting problem @@ -156,7 +156,7 @@ EasyScheduler uses ZooKeeper distributed locks to implement only one Master to e - If a large number of sub-processes are nested in a large DAG, the following figure will result in a "dead" state:

- Thread is not enough to wait for loop + Thread is not enough to wait for loop

In the above figure, MainFlowThread waits for SubFlowThread1 to end, SubFlowThread1 waits for SubFlowThread2 to end, SubFlowThread2 waits for SubFlowThread3 to end, and SubFlowThread3 waits for a new thread in the thread pool, then the entire DAG process cannot end, and thus the thread cannot be released. This forms the state of the child parent process loop waiting. At this point, the scheduling cluster will no longer be available unless a new Master is started to add threads to break such a "stuck." @@ -180,7 +180,7 @@ Fault tolerance is divided into service fault tolerance and task retry. Service Service fault tolerance design relies on ZooKeeper's Watcher mechanism. The implementation principle is as follows:

- EasyScheduler Fault Tolerant Design + EasyScheduler Fault Tolerant Design

The Master monitors the directories of other Masters and Workers. If the remove event is detected, the process instance is fault-tolerant or the task instance is fault-tolerant according to the specific business logic. @@ -190,7 +190,7 @@ The Master monitors the directories of other Masters and Workers. If the remove - Master fault tolerance flow chart:

- Master Fault Tolerance Flowchart + Master Fault Tolerance Flowchart

After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler thread in EasyScheduler. It traverses the DAG to find the "Running" and "Submit Successful" tasks, and monitors the status of its task instance for the "Running" task. You need to determine whether the Task Queue already exists. If it exists, monitor the status of the task instance. If it does not exist, resubmit the task instance. @@ -200,7 +200,7 @@ After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler - Worker fault tolerance flow chart:

- Worker Fault Tolerance Flowchart + Worker Fault Tolerance Flowchart

Once the Master Scheduler thread finds the task instance as "need to be fault tolerant", it takes over the task and resubmits. @@ -239,13 +239,13 @@ In the early scheduling design, if there is no priority design and fair scheduli - The priority of the process definition is that some processes need to be processed before other processes. This can be configured at the start of the process or at the time of scheduled start. There are 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below

- Process Priority Configuration + Process Priority Configuration

- The priority of the task is also divided into 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below

- task priority configuration + task priority configuration

##### VI. Logback and gRPC implement log access @@ -256,7 +256,7 @@ In the early scheduling design, if there is no priority design and fair scheduli - Considering the lightweightness of EasyScheduler as much as possible, gRPC was chosen to implement remote access log information.

- grpc remote access + grpc remote access

- We use a custom Logback FileAppender and Filter function to generate a log file for each task instance. diff --git a/docs/en_US/backend-deployment.md b/docs/en_US/backend-deployment.md index f35c8d7a62..934a005f6b 100644 --- a/docs/en_US/backend-deployment.md +++ b/docs/en_US/backend-deployment.md @@ -69,7 +69,7 @@ Configure SSH secret-free login on deployment machines and other installation ma Execute scripts for creating tables and importing basic data ``` - sh ./script/create_escheduler.sh + sh ./script/create-escheduler.sh ``` #### Preparations 5: Modify the deployment directory permissions and operation parameters @@ -158,11 +158,11 @@ After normal compilation, ./target/escheduler-{version}/ is generated in the cur * stop all services in the cluster - ` sh ./bin/stop_all.sh` + ` sh ./bin/stop-all.sh` * start all services in the cluster - ` sh ./bin/start_all.sh` + ` sh ./bin/start-all.sh` * start and stop one master server @@ -201,7 +201,7 @@ sh ./bin/escheduler-daemon.sh stop alert-server Database upgrade is a function added in version 1.0.2. The database can be upgraded automatically by executing the following command: ```upgrade -sh ./script/upgrade_escheduler.sh +sh ./script/upgrade-escheduler.sh ``` diff --git a/docs/en_US/frontend-deployment.md b/docs/en_US/frontend-deployment.md index 46372c2d88..919caf1485 100644 --- a/docs/en_US/frontend-deployment.md +++ b/docs/en_US/frontend-deployment.md @@ -64,7 +64,7 @@ server { index index.html index.html; } location /escheduler { - proxy_pass http://192.168.xx.xx:12345; # nterface address (self-modifying) + proxy_pass http://192.168.xx.xx:12345; # interface address (self-modifying) proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header x_real_ipP $remote_addr; diff --git a/docs/en_US/frontend-development.md b/docs/en_US/frontend-development.md index fc27f40613..286c598dbc 100644 --- a/docs/en_US/frontend-development.md +++ b/docs/en_US/frontend-development.md @@ -63,7 +63,7 @@ Copy it to the corresponding directory of the server (front-end service static p Visit address` http://localhost:8888/#/` -#### Start with node and daemon under Liunx +#### Start with node and daemon under Linux Install pm2 `npm install -g pm2` @@ -238,7 +238,7 @@ The internal common component of the `src/js/module/components` project writes t ├── conditions ├── conditions.vue └── _source - └── serach.vue + └── search.vue └── util.js ``` diff --git a/docs/en_US/system-manual.md b/docs/en_US/system-manual.md index d5a63af80d..d571e1d66f 100644 --- a/docs/en_US/system-manual.md +++ b/docs/en_US/system-manual.md @@ -340,7 +340,7 @@ conf/common/hadoop.properties - Queues are used to execute spark, mapreduce and other programs, which require the use of "queue" parameters. -- "Security" - > "Queue Manage" - > "Creat Queue" +- "Security" - > "Queue Manage" - > "Create Queue"

@@ -403,7 +403,7 @@ conf/common/hadoop.properties try { // execute response = httpclient.execute(httpPost); - // eponse status code 200 + // response status code 200 if (response.getStatusLine().getStatusCode() == 200) { String content = EntityUtils.toString(response.getEntity(), "UTF-8"); System.out.println(content); @@ -533,7 +533,7 @@ conf/common/hadoop.properties

-- Datasource: The data source type of stored procedure supports MySQL and POSTGRRESQL, and chooses the corresponding data source. +- Datasource: The data source type of stored procedure supports MySQL and POSTGRESQL, and chooses the corresponding data source. - Method: The method name of the stored procedure - Custom parameters: Custom parameter types of stored procedures support IN and OUT, and data types support nine data types: VARCHAR, INTEGER, LONG, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP and BOOLEAN. diff --git a/docs/en_US/upgrade.md b/docs/en_US/upgrade.md index 47bfc19f18..b5c743fd84 100644 --- a/docs/en_US/upgrade.md +++ b/docs/en_US/upgrade.md @@ -5,7 +5,7 @@ ## 2. Stop all services of escheduler - `sh ./script/stop_all.sh` + `sh ./script/stop-all.sh` ## 3. Download the new version of the installation package @@ -23,7 +23,7 @@ - Execute database upgrade script -`sh ./script/upgrade_escheduler.sh` +`sh ./script/upgrade-escheduler.sh` ## 5. Backend service upgrade diff --git a/docs/zh_CN/前端开发文档.md b/docs/zh_CN/前端开发文档.md index b3e8aa82d9..f805f5ed8c 100644 --- a/docs/zh_CN/前端开发文档.md +++ b/docs/zh_CN/前端开发文档.md @@ -64,7 +64,7 @@ npm install node-sass --unsafe-perm //单独安装node-sass依赖 访问地址 `http://localhost:8888/#/` -#### Liunx下使用node启动并且守护进程 +#### Linux下使用node启动并且守护进程 安装pm2 `npm install -g pm2` @@ -237,7 +237,7 @@ export default { ├── conditions ├── conditions.vue └── _source - └── serach.vue + └── search.vue └── util.js ``` diff --git a/docs/zh_CN/升级文档.md b/docs/zh_CN/升级文档.md index f7bafff6c1..83166971fc 100644 --- a/docs/zh_CN/升级文档.md +++ b/docs/zh_CN/升级文档.md @@ -5,7 +5,7 @@ ## 2. 停止escheduler所有服务 - `sh ./script/stop_all.sh` + `sh ./script/stop-all.sh` ## 3. 下载新版本的安装包 @@ -23,7 +23,7 @@ - 执行数据库升级脚本 -`sh ./script/upgrade_escheduler.sh` +`sh ./script/upgrade-escheduler.sh` ## 5. 后端服务升级 diff --git a/docs/zh_CN/后端部署文档.md b/docs/zh_CN/后端部署文档.md index 8ab315a355..bf217880a5 100644 --- a/docs/zh_CN/后端部署文档.md +++ b/docs/zh_CN/后端部署文档.md @@ -63,7 +63,7 @@ escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL ``` 执行创建表和导入基础数据脚本 ``` - sh ./script/create_escheduler.sh + sh ./script/create-escheduler.sh ``` #### 准备五: 修改部署目录权限及运行参数 @@ -164,11 +164,11 @@ install.sh : 一键部署脚本 * 一键停止集群所有服务 - ` sh ./bin/stop_all.sh` + ` sh ./bin/stop-all.sh` * 一键开启集群所有服务 - ` sh ./bin/start_all.sh` + ` sh ./bin/start-all.sh` * 启停Master @@ -206,5 +206,5 @@ sh ./bin/escheduler-daemon.sh stop alert-server ## 3、数据库升级 数据库升级是在1.0.2版本增加的功能,执行以下命令即可自动升级数据库 ``` -sh ./script/upgrade_escheduler.sh +sh ./script/upgrade-escheduler.sh ``` diff --git a/docs/zh_CN/系统使用手册.md b/docs/zh_CN/系统使用手册.md index 2e5ee635b3..348cc2b36a 100644 --- a/docs/zh_CN/系统使用手册.md +++ b/docs/zh_CN/系统使用手册.md @@ -110,7 +110,7 @@ > 点击任务实例节点,点击**查看历史**,可以查看该工作流实例运行的该任务实例列表

- +

@@ -391,7 +391,7 @@ conf/common/hadoop.properties try { // execute response = httpclient.execute(httpPost); - // eponse status code 200 + // response status code 200 if (response.getStatusLine().getStatusCode() == 200) { String content = EntityUtils.toString(response.getEntity(), "UTF-8"); System.out.println(content); diff --git a/docs/zh_CN/系统架构设计.md b/docs/zh_CN/系统架构设计.md index 134684155d..8c3ef4e053 100644 --- a/docs/zh_CN/系统架构设计.md +++ b/docs/zh_CN/系统架构设计.md @@ -5,7 +5,7 @@ **DAG:** 全称Directed Acyclic Graph,简称DAG。工作流中的Task任务以有向无环图的形式组装起来,从入度为零的节点进行拓扑遍历,直到无后继节点为止。举例如下图:

- dag示例 + dag示例

dag示例

@@ -37,7 +37,7 @@ #### 2.1 系统架构图

- 系统架构图 + 系统架构图

系统架构图

@@ -98,7 +98,7 @@ 中心化的设计理念比较简单,分布式集群中的节点按照角色分工,大体上分为两种角色:

- master-slave角色 + master-slave角色

- Master的角色主要负责任务分发并监督Slave的健康状态,可以动态的将任务均衡到Slave上,以致Slave节点不至于“忙死”或”闲死”的状态。 @@ -115,7 +115,7 @@ ###### 去中心化

+ 去中心化

- 在去中心化设计里,通常没有Master/Slave的概念,所有的角色都是一样的,地位是平等的,全球互联网就是一个典型的去中心化的分布式系统,联网的任意节点设备down机,都只会影响很小范围的功能。 @@ -131,12 +131,12 @@ EasyScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master执行Scheduler,或者只有一台Worker执行任务的提交。 1. 获取分布式锁的核心流程算法如下

- 获取分布式锁流程 + 获取分布式锁流程

2. EasyScheduler中Scheduler线程分布式锁实现流程图:

- 获取分布式锁流程 + 获取分布式锁流程

@@ -146,7 +146,7 @@ EasyScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master - 如果一个大的DAG中嵌套了很多子流程,如下图则会产生“死等”状态:

- 线程不足循环等待问题 + 线程不足循环等待问题

上图中MainFlowThread等待SubFlowThread1结束,SubFlowThread1等待SubFlowThread2结束, SubFlowThread2等待SubFlowThread3结束,而SubFlowThread3等待线程池有新线程,则整个DAG流程不能结束,从而其中的线程也不能释放。这样就形成的子父流程循环等待的状态。此时除非启动新的Master来增加线程来打破这样的”僵局”,否则调度集群将不能再使用。 @@ -169,7 +169,7 @@ EasyScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master 服务容错设计依赖于ZooKeeper的Watcher机制,实现原理如图:

- EasyScheduler容错设计 + EasyScheduler容错设计

其中Master监控其他Master和Worker的目录,如果监听到remove事件,则会根据具体的业务逻辑进行流程实例容错或者任务实例容错。 @@ -178,7 +178,7 @@ EasyScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master - Master容错流程图:

- Master容错流程图 + Master容错流程图

ZooKeeper Master容错完成之后则重新由EasyScheduler中Scheduler线程调度,遍历 DAG 找到”正在运行”和“提交成功”的任务,对”正在运行”的任务监控其任务实例的状态,对”提交成功”的任务需要判断Task Queue中是否已经存在,如果存在则同样监控任务实例的状态,如果不存在则重新提交任务实例。 @@ -187,7 +187,7 @@ ZooKeeper Master容错完成之后则重新由EasyScheduler中Scheduler线程调 - Worker容错流程图:

- Worker容错流程图 + Worker容错流程图

Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则接管任务并进行重新提交。 @@ -224,12 +224,12 @@ Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则 - 其中流程定义的优先级是考虑到有些流程需要先于其他流程进行处理,这个可以在流程启动或者定时启动时配置,共有5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图

- 流程优先级配置 + 流程优先级配置

- 任务的优先级也分为5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图

- 任务优先级配置 + 任务优先级配置

@@ -242,7 +242,7 @@ Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则 - 介于考虑到尽可能的EasyScheduler的轻量级性,所以选择了gRPC实现远程访问日志信息。

- grpc远程访问 + grpc远程访问

diff --git a/escheduler-alert/src/main/java/cn/escheduler/alert/AlertServer.java b/escheduler-alert/src/main/java/cn/escheduler/alert/AlertServer.java index 8de3a65d6c..27c5f0ce56 100644 --- a/escheduler-alert/src/main/java/cn/escheduler/alert/AlertServer.java +++ b/escheduler-alert/src/main/java/cn/escheduler/alert/AlertServer.java @@ -39,7 +39,7 @@ public class AlertServer { private AlertSender alertSender; - private static AlertServer instance; + private static volatile AlertServer instance; private AlertServer() { diff --git a/escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java b/escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java index d077dcf65c..037f6e775d 100644 --- a/escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java +++ b/escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java @@ -47,11 +47,15 @@ public class Constants { public static final String MAIL_SENDER = "mail.sender"; + public static final String MAIL_USER = "mail.user"; + public static final String MAIL_PASSWD = "mail.passwd"; public static final String XLS_FILE_PATH = "xls.file.path"; - public static final String MAIL_HOST = "mail.host"; + public static final String MAIL_HOST = "mail.smtp.host"; + + public static final String MAIL_PORT = "mail.smtp.port"; public static final String MAIL_SMTP_AUTH = "mail.smtp.auth"; @@ -61,6 +65,8 @@ public class Constants { public static final String MAIL_SMTP_SSL_ENABLE = "mail.smtp.ssl.enable"; + public static final String MAIL_SMTP_SSL_TRUST="mail.smtp.ssl.trust"; + public static final String TEXT_HTML_CHARSET_UTF_8 = "text/html;charset=utf-8"; public static final String STRING_TRUE = "true"; diff --git a/escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java b/escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java index 50d161f019..82add04daf 100644 --- a/escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java +++ b/escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java @@ -56,6 +56,8 @@ public class MailUtils { public static final String mailSender = getString(Constants.MAIL_SENDER); + public static final String mailUser = getString(Constants.MAIL_USER); + public static final String mailPasswd = getString(Constants.MAIL_PASSWD); public static final Boolean mailUseStartTLS = getBoolean(Constants.MAIL_SMTP_STARTTLS_ENABLE); @@ -68,6 +70,8 @@ public class MailUtils { public static final String sslEnable = getString(Constants.MAIL_SMTP_SSL_ENABLE); + public static final String sslTrust = getString(Constants.MAIL_SMTP_SSL_TRUST); + private static Template MAIL_TEMPLATE; static { @@ -126,16 +130,10 @@ public class MailUtils { HtmlEmail email = new HtmlEmail(); try { - // set the SMTP sending server, 163 as follows: "smtp.163.com" - email.setHostName(mailServerHost); - email.setSmtpPort(mailServerPort); - //set charset + Session session = getSession(); + email.setMailSession(session); + email.setFrom(mailSender); email.setCharset(Constants.UTF_8); - // TLS verification - email.setTLS(Boolean.valueOf(starttlsEnable)); - - // SSL verification - email.setSSL(Boolean.valueOf(sslEnable)); if (CollectionUtils.isNotEmpty(receivers)){ // receivers mail for (String receiver : receivers) { @@ -286,23 +284,11 @@ public class MailUtils { // Security.addProvider(new com.sun.net.ssl.internal.ssl.Provider()); // final String SSL_FACTORY = "javax.net.ssl.SSLSocketFactory"; - Properties props = new Properties(); - props.setProperty(Constants.MAIL_HOST, mailServerHost); - props.setProperty(Constants.MAIL_SMTP_AUTH, Constants.STRING_TRUE); - props.setProperty(Constants.MAIL_TRANSPORT_PROTOCOL, mailProtocol); - props.setProperty(Constants.MAIL_SMTP_STARTTLS_ENABLE, starttlsEnable); - props.setProperty("mail.smtp.ssl.enable", sslEnable); - Authenticator auth = new Authenticator() { - @Override - protected PasswordAuthentication getPasswordAuthentication() { - // mail username and password - return new PasswordAuthentication(mailSender, mailPasswd); - } - }; // 1. The first step in creating mail: creating session - Session session = Session.getInstance(props, auth); + Session session = getSession(); // Setting debug mode, can be turned off session.setDebug(false); + // 2. creating mail: Creating a MimeMessage MimeMessage msg = new MimeMessage(session); // 3. set sender @@ -314,6 +300,32 @@ public class MailUtils { return msg; } + /** + * get session + * @return + */ + private static Session getSession() { + Properties props = new Properties(); + props.setProperty(Constants.MAIL_HOST, mailServerHost); + props.setProperty(Constants.MAIL_PORT, String.valueOf(mailServerPort)); + props.setProperty(Constants.MAIL_SMTP_AUTH, Constants.STRING_TRUE); + props.setProperty(Constants.MAIL_TRANSPORT_PROTOCOL, mailProtocol); + props.setProperty(Constants.MAIL_SMTP_STARTTLS_ENABLE, starttlsEnable); + props.setProperty(Constants.MAIL_SMTP_SSL_ENABLE, sslEnable); + props.setProperty(Constants.MAIL_SMTP_SSL_TRUST, sslTrust); + + Authenticator auth = new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + // mail username and password + return new PasswordAuthentication(mailUser, mailPasswd); + } + }; + + Session session = Session.getInstance(props, auth); + return session; + } + /** * * @param receiversCc @@ -370,13 +382,6 @@ public class MailUtils { * @throws EmailException */ private static Map getStringObjectMap(String title, String content, ShowType showType, Map retMap, HtmlEmail email) throws EmailException { - // sender's mailbox - email.setFrom(mailSender, mailSender); - /** - * if you need authentication information, set authentication: username-password. - * The registered name and password of the sender on the mail server respectively - */ - email.setAuthentication(mailSender, mailPasswd); /** * the subject of the message to be sent diff --git a/escheduler-alert/src/main/resources/alert.properties b/escheduler-alert/src/main/resources/alert.properties index 87ccae6377..096c60b4d8 100644 --- a/escheduler-alert/src/main/resources/alert.properties +++ b/escheduler-alert/src/main/resources/alert.properties @@ -3,15 +3,17 @@ alert.type=EMAIL # mail server configuration mail.protocol=SMTP -mail.server.host=smtp.exmail.qq.com +mail.server.host=xxx.xxx.com mail.server.port=25 -mail.sender=xxxxxxx -mail.passwd=xxxxxxx +mail.sender=xxx@xxx.com +mail.user=xxx@xxx.com +mail.passwd=111111 # TLS -mail.smtp.starttls.enable=false +mail.smtp.starttls.enable=true # SSL -mail.smtp.ssl.enable=true +mail.smtp.ssl.enable=false +mail.smtp.ssl.trust=xxx.xxx.com #xls file path,need create if not exist xls.file.path=/tmp/xls diff --git a/escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessDefinitionController.java b/escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessDefinitionController.java index aabd44b832..fcaee5fffa 100644 --- a/escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessDefinitionController.java +++ b/escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessDefinitionController.java @@ -390,10 +390,14 @@ public class ProcessDefinitionController extends BaseController{ * @param processDefinitionId * @return */ + @ApiOperation(value = "deleteProcessDefinitionById", notes= "DELETE_PROCESS_DEFINITION_BY_ID_NOTES") + @ApiImplicitParams({ + @ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", dataType = "Int", example = "100") + }) @GetMapping(value="/delete") @ResponseStatus(HttpStatus.OK) - public Result deleteProcessDefinitionById(@RequestAttribute(value = Constants.SESSION_USER) User loginUser, - @PathVariable String projectName, + public Result deleteProcessDefinitionById(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, + @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, @RequestParam("processDefinitionId") Integer processDefinitionId ){ try{ @@ -415,10 +419,14 @@ public class ProcessDefinitionController extends BaseController{ * @param processDefinitionIds * @return */ + @ApiOperation(value = "batchDeleteProcessDefinitionByIds", notes= "BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES") + @ApiImplicitParams({ + @ApiImplicitParam(name = "processDefinitionIds", value = "PROCESS_DEFINITION_IDS", type = "String") + }) @GetMapping(value="/batch-delete") @ResponseStatus(HttpStatus.OK) - public Result batchDeleteProcessDefinitionByIds(@RequestAttribute(value = Constants.SESSION_USER) User loginUser, - @PathVariable String projectName, + public Result batchDeleteProcessDefinitionByIds(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, + @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, @RequestParam("processDefinitionIds") String processDefinitionIds ){ try{ @@ -459,4 +467,28 @@ public class ProcessDefinitionController extends BaseController{ } } + + + /** + * query proccess definition all by project id + * + * @param loginUser + * @return + */ + @ApiOperation(value = "queryProccessDefinitionAllByProjectId", notes= "QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES") + @GetMapping(value="/queryProccessDefinitionAllByProjectId") + @ResponseStatus(HttpStatus.OK) + public Result queryProccessDefinitionAllByProjectId(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, + @RequestParam("projectId") Integer projectId){ + try{ + logger.info("query proccess definition list, login user:{}, project id:{}", + loginUser.getUserName(),projectId); + Map result = processDefinitionService.queryProccessDefinitionAllByProjectId(projectId); + return returnDataList(result); + }catch (Exception e){ + logger.error(QUERY_PROCCESS_DEFINITION_LIST.getMsg(),e); + return error(QUERY_PROCCESS_DEFINITION_LIST.getCode(), QUERY_PROCCESS_DEFINITION_LIST.getMsg()); + } + } + } diff --git a/escheduler-api/src/main/java/cn/escheduler/api/controller/ProjectController.java b/escheduler-api/src/main/java/cn/escheduler/api/controller/ProjectController.java index c0bc085d0f..e098b1a2be 100644 --- a/escheduler-api/src/main/java/cn/escheduler/api/controller/ProjectController.java +++ b/escheduler-api/src/main/java/cn/escheduler/api/controller/ProjectController.java @@ -279,5 +279,26 @@ public class ProjectController extends BaseController { } } + /** + * query all project list + * @param loginUser + * @return + */ + @ApiOperation(value = "queryAllProjectList", notes= "QUERY_ALL_PROJECT_LIST_NOTES") + @GetMapping(value = "/queryAllProjectList") + @ResponseStatus(HttpStatus.OK) + public Result queryAllProjectList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { + + try { + logger.info("login user {}, query all project list", loginUser.getUserName()); + Map result = projectService.queryAllProjectList(); + return returnDataList(result); + } catch (Exception e) { + logger.error(LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR.getMsg(), e); + return error(Status.LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR.getCode(), Status.LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR.getMsg()); + } + } + + } diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java index 897cab46a3..45d0b46e79 100644 --- a/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java +++ b/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java @@ -862,6 +862,24 @@ public class ProcessDefinitionService extends BaseDAGService { } + + /** + * query proccess definition all by project id + * + * @param projectId + * @return + */ + public Map queryProccessDefinitionAllByProjectId(Integer projectId) { + + HashMap result = new HashMap<>(5); + + List resourceList = processDefineMapper.queryAllDefinitionList(projectId); + result.put(Constants.DATA_LIST, resourceList); + putMsg(result, Status.SUCCESS); + + return result; + } + /** * Encapsulates the TreeView structure * diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java index 3171f26dcd..0e05d944ae 100644 --- a/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java +++ b/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java @@ -32,10 +32,7 @@ import cn.escheduler.common.model.TaskNodeRelation; import cn.escheduler.common.process.Property; import cn.escheduler.common.queue.ITaskQueue; import cn.escheduler.common.queue.TaskQueueFactory; -import cn.escheduler.common.utils.CollectionUtils; -import cn.escheduler.common.utils.DateUtils; -import cn.escheduler.common.utils.JSONUtils; -import cn.escheduler.common.utils.ParameterUtils; +import cn.escheduler.common.utils.*; import cn.escheduler.common.utils.placeholder.BusinessTimeUtils; import cn.escheduler.dao.ProcessDao; import cn.escheduler.dao.mapper.*; @@ -493,29 +490,64 @@ public class ProcessInstanceService extends BaseDAGService { return result; } - int delete = processDao.deleteWorkProcessInstanceById(processInstanceId); - processDao.deleteAllSubWorkProcessByParentId(processInstanceId); - processDao.deleteWorkProcessMapByParentId(processInstanceId); + // delete zk queue + if (CollectionUtils.isNotEmpty(taskInstanceList)){ + for (TaskInstance taskInstance : taskInstanceList){ + // task instance priority + int taskInstancePriority = taskInstance.getTaskInstancePriority().ordinal(); + + StringBuilder nodeValueSb = new StringBuilder(100); + nodeValueSb.append(processInstancePriority) + .append(UNDERLINE) + .append(processInstanceId) + .append(UNDERLINE) + .append(taskInstancePriority) + .append(UNDERLINE) + .append(taskInstance.getId()) + .append(UNDERLINE); + + int taskWorkerGroupId = processDao.getTaskWorkerGroupId(taskInstance); + WorkerGroup workerGroup = workerGroupMapper.queryById(taskWorkerGroupId); + + if(workerGroup == null){ + nodeValueSb.append(DEFAULT_WORKER_ID); + }else { + + String ips = workerGroup.getIpList(); + StringBuilder ipSb = new StringBuilder(100); + String[] ipArray = ips.split(COMMA); + + for (String ip : ipArray) { + long ipLong = IpUtils.ipToLong(ip); + ipSb.append(ipLong).append(COMMA); + } - if (delete > 0) { - if (CollectionUtils.isNotEmpty(taskInstanceList)){ - for (TaskInstance taskInstance : taskInstanceList){ - // task instance priority - int taskInstancePriority = taskInstance.getTaskInstancePriority().ordinal(); - String nodeValue=processInstancePriority + "_" + processInstanceId + "_" +taskInstancePriority + "_" + taskInstance.getId(); - try { - logger.info("delete task queue node : {}",nodeValue); - tasksQueue.removeNode(cn.escheduler.common.Constants.SCHEDULER_TASKS_QUEUE, nodeValue); - }catch (Exception e){ - logger.error("delete task queue node : {}", nodeValue); + if(ipSb.length() > 0) { + ipSb.deleteCharAt(ipSb.length() - 1); } + nodeValueSb.append(ipSb); + } + + try { + logger.info("delete task queue node : {}",nodeValueSb.toString()); + tasksQueue.removeNode(cn.escheduler.common.Constants.SCHEDULER_TASKS_QUEUE, nodeValueSb.toString()); + }catch (Exception e){ + logger.error("delete task queue node : {}", nodeValueSb.toString()); } } + } + // delete database cascade + int delete = processDao.deleteWorkProcessInstanceById(processInstanceId); + processDao.deleteAllSubWorkProcessByParentId(processInstanceId); + processDao.deleteWorkProcessMapByParentId(processInstanceId); + + if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_PROCESS_INSTANCE_BY_ID_ERROR); } + return result; } diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java index ca369de462..c5728c35b3 100644 --- a/escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java +++ b/escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java @@ -367,4 +367,30 @@ public class ProjectService extends BaseService{ } + /** + * query all project list + * @return + */ + public Map queryAllProjectList() { + Map result = new HashMap<>(); + List projects = projectMapper.queryAllProjectList(); + List processDefinitions = processDefinitionMapper.queryAll(); + if(projects != null){ + Set set = new HashSet<>(); + for (ProcessDefinition processDefinition : processDefinitions){ + set.add(processDefinition.getProjectId()); + } + List tempDeletelist = new ArrayList(); + for (Project project : projects) { + if(!set.contains(project.getId())){ + tempDeletelist.add(project); + } + } + projects.removeAll(tempDeletelist); + } + result.put(Constants.DATA_LIST, projects); + putMsg(result,Status.SUCCESS); + return result; + } + } diff --git a/escheduler-api/src/main/resources/i18n/messages.properties b/escheduler-api/src/main/resources/i18n/messages.properties index a663c71013..44787fd78f 100644 --- a/escheduler-api/src/main/resources/i18n/messages.properties +++ b/escheduler-api/src/main/resources/i18n/messages.properties @@ -113,6 +113,7 @@ QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project +QUERY_ALL_PROJECT_LIST_NOTES=query all project list QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project TASK_RECORD_TAG=task record related operation QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging @@ -152,10 +153,12 @@ VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name LOGIN_NOTES=user login UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition PROCESS_DEFINITION_ID=process definition id +PROCESS_DEFINITION_IDS=process definition ids RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging +QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) @@ -170,6 +173,9 @@ LIMIT=limit VIEW_TREE_NOTES=view tree GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id PROCESS_DEFINITION_ID_LIST=process definition id list +QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id +DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id +BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id diff --git a/escheduler-api/src/main/resources/i18n/messages_en_US.properties b/escheduler-api/src/main/resources/i18n/messages_en_US.properties index a663c71013..d06b83fed5 100644 --- a/escheduler-api/src/main/resources/i18n/messages_en_US.properties +++ b/escheduler-api/src/main/resources/i18n/messages_en_US.properties @@ -111,6 +111,7 @@ UPDATE_PROJECT_NOTES=update project PROJECT_ID=project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING +QUERY_ALL_PROJECT_LIST_NOTES=query all project list DELETE_PROJECT_BY_ID_NOTES=delete project by id QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project @@ -152,10 +153,12 @@ VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name LOGIN_NOTES=user login UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition PROCESS_DEFINITION_ID=process definition id +PROCESS_DEFINITION_IDS=process definition ids RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging +QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list PAGE_NO=page no PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_JSON=process instance info(json format) @@ -170,6 +173,9 @@ LIMIT=limit VIEW_TREE_NOTES=view tree GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id PROCESS_DEFINITION_ID_LIST=process definition id list +QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id +DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id +BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id TASK_ID=task instance id diff --git a/escheduler-api/src/main/resources/i18n/messages_zh_CN.properties b/escheduler-api/src/main/resources/i18n/messages_zh_CN.properties index b0d6694d2b..46b0270747 100644 --- a/escheduler-api/src/main/resources/i18n/messages_zh_CN.properties +++ b/escheduler-api/src/main/resources/i18n/messages_zh_CN.properties @@ -110,6 +110,7 @@ UPDATE_PROJECT_NOTES=更新项目 PROJECT_ID=项目ID QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 +QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目 DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 @@ -155,8 +156,10 @@ RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义 QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表 QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 +QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义 PAGE_NO=页码号 PROCESS_INSTANCE_ID=流程实例ID +PROCESS_INSTANCE_IDS=流程实例ID集合 PROCESS_INSTANCE_JSON=流程实例信息(json格式) SCHEDULE_TIME=定时时间 SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 @@ -168,6 +171,9 @@ LIMIT=显示多少条 VIEW_TREE_NOTES=树状图 GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID PROCESS_DEFINITION_ID_LIST=流程定义id列表 +QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID +BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合 +DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID TASK_ID=任务实例ID diff --git a/escheduler-api/src/test/java/cn/escheduler/api/HttpClientTest.java b/escheduler-api/src/test/java/cn/escheduler/api/HttpClientTest.java index b2495a2f9d..1d527b563c 100644 --- a/escheduler-api/src/test/java/cn/escheduler/api/HttpClientTest.java +++ b/escheduler-api/src/test/java/cn/escheduler/api/HttpClientTest.java @@ -59,7 +59,7 @@ public class HttpClientTest { try { // execute response = httpclient.execute(httpPost); - // eponse status code 200 + // response status code 200 if (response.getStatusLine().getStatusCode() == 200) { String content = EntityUtils.toString(response.getEntity(), "UTF-8"); logger.info(content); @@ -96,7 +96,7 @@ public class HttpClientTest { try { // execute http get request response = httpclient.execute(httpGet); - // reponse status code 200 + // response status code 200 if (response.getStatusLine().getStatusCode() == 200) { String content = EntityUtils.toString(response.getEntity(), "UTF-8"); logger.info("start--------------->"); @@ -139,7 +139,7 @@ public class HttpClientTest { try { // execute http get request response = httpclient.execute(httpGet); - // reponse status code 200 + // response status code 200 if (response.getStatusLine().getStatusCode() == 200) { String content = EntityUtils.toString(response.getEntity(), "UTF-8"); logger.info("start--------------->"); diff --git a/escheduler-common/src/main/java/cn/escheduler/common/Constants.java b/escheduler-common/src/main/java/cn/escheduler/common/Constants.java index 10a4460678..7eaa6b7926 100644 --- a/escheduler-common/src/main/java/cn/escheduler/common/Constants.java +++ b/escheduler-common/src/main/java/cn/escheduler/common/Constants.java @@ -906,4 +906,18 @@ public final class Constants { * hive conf */ public static final String HIVE_CONF = "hiveconf:"; + + //flink 任务 + public static final String FLINK_YARN_CLUSTER = "yarn-cluster"; + public static final String FLINK_RUN_MODE = "-m"; + public static final String FLINK_YARN_SLOT = "-ys"; + public static final String FLINK_APP_NAME = "-ynm"; + public static final String FLINK_TASK_MANAGE = "-yn"; + + public static final String FLINK_JOB_MANAGE_MEM = "-yjm"; + public static final String FLINK_TASK_MANAGE_MEM = "-ytm"; + public static final String FLINK_detach = "-d"; + public static final String FLINK_MAIN_CLASS = "-c"; + + } diff --git a/escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java b/escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java index 1d589167e3..7e4fde6a34 100644 --- a/escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java +++ b/escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java @@ -29,8 +29,9 @@ public enum TaskType { * 5 SPARK * 6 PYTHON * 7 DEPENDENT + * 8 FLINK */ - SHELL,SQL, SUB_PROCESS,PROCEDURE,MR,SPARK,PYTHON,DEPENDENT; + SHELL,SQL, SUB_PROCESS,PROCEDURE,MR,SPARK,PYTHON,DEPENDENT,FLINK; public static boolean typeIsNormalTask(String typeName) { TaskType taskType = TaskType.valueOf(typeName); diff --git a/escheduler-common/src/main/java/cn/escheduler/common/shell/AbstractShell.java b/escheduler-common/src/main/java/cn/escheduler/common/shell/AbstractShell.java index 1250732632..0880c4d5bb 100644 --- a/escheduler-common/src/main/java/cn/escheduler/common/shell/AbstractShell.java +++ b/escheduler-common/src/main/java/cn/escheduler/common/shell/AbstractShell.java @@ -157,7 +157,7 @@ public abstract class AbstractShell { BufferedReader inReader = new BufferedReader(new InputStreamReader(process .getInputStream())); - final StringBuffer errMsg = new StringBuffer(); + final StringBuilder errMsg = new StringBuilder(); // read error and input streams as this would free up the buffers // free the error stream buffer diff --git a/escheduler-common/src/main/java/cn/escheduler/common/task/flink/FlinkParameters.java b/escheduler-common/src/main/java/cn/escheduler/common/task/flink/FlinkParameters.java new file mode 100644 index 0000000000..54dfcb7103 --- /dev/null +++ b/escheduler-common/src/main/java/cn/escheduler/common/task/flink/FlinkParameters.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package cn.escheduler.common.task.flink; + +import cn.escheduler.common.enums.ProgramType; +import cn.escheduler.common.process.ResourceInfo; +import cn.escheduler.common.task.AbstractParameters; + +import java.util.List; +import java.util.stream.Collectors; + +/** + * spark parameters + */ +public class FlinkParameters extends AbstractParameters { + + /** + * major jar + */ + private ResourceInfo mainJar; + + /** + * major class + */ + private String mainClass; + + /** + * deploy mode yarn-cluster yarn-client yarn-local + */ + private String deployMode; + + /** + * arguments + */ + private String mainArgs; + + /** + * slot个数 + */ + private int slot; + + /** + *Yarn application的名字 + */ + + private String appName; + + /** + * taskManager 数量 + */ + private int taskManager; + + /** + * jobManagerMemory 内存大小 + */ + private String jobManagerMemory ; + + /** + * taskManagerMemory内存大小 + */ + private String taskManagerMemory; + + /** + * resource list + */ + private List resourceList; + + /** + * The YARN queue to submit to + */ + private String queue; + + /** + * other arguments + */ + private String others; + + /** + * program type + * 0 JAVA,1 SCALA,2 PYTHON + */ + private ProgramType programType; + + public ResourceInfo getMainJar() { + return mainJar; + } + + public void setMainJar(ResourceInfo mainJar) { + this.mainJar = mainJar; + } + + public String getMainClass() { + return mainClass; + } + + public void setMainClass(String mainClass) { + this.mainClass = mainClass; + } + + public String getDeployMode() { + return deployMode; + } + + public void setDeployMode(String deployMode) { + this.deployMode = deployMode; + } + + public String getMainArgs() { + return mainArgs; + } + + public void setMainArgs(String mainArgs) { + this.mainArgs = mainArgs; + } + + public int getSlot() { + return slot; + } + + public void setSlot(int slot) { + this.slot = slot; + } + + public String getAppName() { + return appName; + } + + public void setAppName(String appName) { + this.appName = appName; + } + + public int getTaskManager() { + return taskManager; + } + + public void setTaskManager(int taskManager) { + this.taskManager = taskManager; + } + + public String getJobManagerMemory() { + return jobManagerMemory; + } + + public void setJobManagerMemory(String jobManagerMemory) { + this.jobManagerMemory = jobManagerMemory; + } + + public String getTaskManagerMemory() { + return taskManagerMemory; + } + + public void setTaskManagerMemory(String taskManagerMemory) { + this.taskManagerMemory = taskManagerMemory; + } + + public String getQueue() { + return queue; + } + + public void setQueue(String queue) { + this.queue = queue; + } + + public List getResourceList() { + return resourceList; + } + + public void setResourceList(List resourceList) { + this.resourceList = resourceList; + } + + public String getOthers() { + return others; + } + + public void setOthers(String others) { + this.others = others; + } + + public ProgramType getProgramType() { + return programType; + } + + public void setProgramType(ProgramType programType) { + this.programType = programType; + } + + @Override + public boolean checkParameters() { + return mainJar != null && programType != null; + } + + + @Override + public List getResourceFilesList() { + if(resourceList !=null ) { + this.resourceList.add(mainJar); + return resourceList.stream() + .map(p -> p.getRes()).collect(Collectors.toList()); + } + return null; + } + + +} diff --git a/escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadPoolExecutors.java b/escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadPoolExecutors.java index 8329e8c18a..ea03026d3a 100644 --- a/escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadPoolExecutors.java +++ b/escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadPoolExecutors.java @@ -40,7 +40,7 @@ public class ThreadPoolExecutors { private static final Logger logger = LoggerFactory.getLogger(ThreadPoolExecutors.class); private static Executor executor; - private static ThreadPoolExecutors threadPoolExecutors; + private static volatile ThreadPoolExecutors threadPoolExecutors; private ThreadPoolExecutors(){} diff --git a/escheduler-common/src/main/java/cn/escheduler/common/utils/MysqlUtils.java b/escheduler-common/src/main/java/cn/escheduler/common/utils/ConnectionUtils.java similarity index 87% rename from escheduler-common/src/main/java/cn/escheduler/common/utils/MysqlUtils.java rename to escheduler-common/src/main/java/cn/escheduler/common/utils/ConnectionUtils.java index 3520527c1a..33e5d41b97 100644 --- a/escheduler-common/src/main/java/cn/escheduler/common/utils/MysqlUtils.java +++ b/escheduler-common/src/main/java/cn/escheduler/common/utils/ConnectionUtils.java @@ -21,16 +21,16 @@ import org.slf4j.LoggerFactory; import java.sql.*; -public class MysqlUtils { +public class ConnectionUtils { - public static final Logger logger = LoggerFactory.getLogger(MysqlUtils.class); + public static final Logger logger = LoggerFactory.getLogger(ConnectionUtils.class); - private static MysqlUtils instance; + private static ConnectionUtils instance; - MysqlUtils() { + ConnectionUtils() { } - public static MysqlUtils getInstance() { + public static ConnectionUtils getInstance() { if (null == instance) { syncInit(); } @@ -39,7 +39,7 @@ public class MysqlUtils { private static synchronized void syncInit() { if (instance == null) { - instance = new MysqlUtils(); + instance = new ConnectionUtils(); } } @@ -76,7 +76,7 @@ public class MysqlUtils { } public static void releaseResource(ResultSet rs, PreparedStatement ps, Connection conn) { - MysqlUtils.getInstance().release(rs,ps,conn); + ConnectionUtils.getInstance().release(rs,ps,conn); if (null != rs) { try { rs.close(); diff --git a/escheduler-common/src/main/java/cn/escheduler/common/utils/TaskParametersUtils.java b/escheduler-common/src/main/java/cn/escheduler/common/utils/TaskParametersUtils.java index feff4141da..c8ceeb44a2 100644 --- a/escheduler-common/src/main/java/cn/escheduler/common/utils/TaskParametersUtils.java +++ b/escheduler-common/src/main/java/cn/escheduler/common/utils/TaskParametersUtils.java @@ -19,6 +19,7 @@ package cn.escheduler.common.utils; import cn.escheduler.common.enums.TaskType; import cn.escheduler.common.task.AbstractParameters; import cn.escheduler.common.task.dependent.DependentParameters; +import cn.escheduler.common.task.flink.FlinkParameters; import cn.escheduler.common.task.mr.MapreduceParameters; import cn.escheduler.common.task.procedure.ProcedureParameters; import cn.escheduler.common.task.python.PythonParameters; @@ -63,6 +64,8 @@ public class TaskParametersUtils { return JSONUtils.parseObject(parameter, PythonParameters.class); case DEPENDENT: return JSONUtils.parseObject(parameter, DependentParameters.class); + case FLINK: + return JSONUtils.parseObject(parameter, FlinkParameters.class); default: return null; } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java b/escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java index 97e65115d0..8237a85805 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java @@ -48,10 +48,10 @@ public class DaoFactory { synchronized (daoMap) { if (!daoMap.containsKey(className)) { try { - T t = BeanContext.getBean(clazz); -// T t = clazz.getConstructor().newInstance(); -// // 实例初始化 -// t.init(); +// T t = BeanContext.getBean(clazz); + T t = clazz.getConstructor().newInstance(); + // 实例初始化 + t.init(); daoMap.put(className, t); } catch (Exception e) { logger.error(e.getMessage(), e); diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java b/escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java index ee0dd48575..73ae868c80 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java @@ -1739,7 +1739,7 @@ public class ProcessDao extends AbstractBaseDao { * @param processInstanceId * @return */ - public String queryQueueByProcessInstanceId(int processInstanceId){ + public String queryUserQueueByProcessInstanceId(int processInstanceId){ return userMapper.queryQueueByProcessInstanceId(processInstanceId); } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java index 0c88fa7161..8fb6d7bd3a 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java @@ -255,4 +255,26 @@ public interface ProcessDefinitionMapper { int updateReceiversAndCcById(@Param("receivers") String receivers, @Param("receiversCc") String receiversCc, @Param("processDefinitionId") int processDefinitionId); + + /** + * query all + * @return + */ + @Results(value = {@Result(property = "id", column = "id", id = true, javaType = Integer.class, jdbcType = JdbcType.INTEGER), + @Result(property = "name", column = "name", javaType = String.class, jdbcType = JdbcType.VARCHAR), + @Result(property = "version", column = "version", javaType = Integer.class, jdbcType = JdbcType.TINYINT), + @Result(property = "releaseState", column = "release_state", typeHandler = EnumOrdinalTypeHandler.class, javaType = ReleaseState.class, jdbcType = JdbcType.TINYINT), + @Result(property = "projectId", column = "project_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER), + @Result(property = "userId", column = "user_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER), + @Result(property = "desc", column = "desc", javaType = String.class, jdbcType = JdbcType.VARCHAR), + @Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE), + @Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE), + @Result(property = "flag", column = "flag", typeHandler = EnumOrdinalTypeHandler.class, javaType = Flag.class, jdbcType = JdbcType.TINYINT), + @Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR), + @Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER), + @Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER), + @Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR) + }) + @SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryAll") + List queryAll(); } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java index a3a8e0e00f..620a0ec8d2 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java @@ -297,4 +297,19 @@ public class ProcessDefinitionMapperProvider { } }.toString(); } + + + /** + * query all + * @return + */ + public String queryAll() { + return new SQL() {{ + SELECT("id,name,version,release_state,project_id,user_id,`desc`,create_time,update_time,flag,global_params,receivers,receivers_cc"); + + FROM(TABLE_NAME ); + + ORDER_BY("create_time desc "); + }}.toString(); + } } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java index 78165e3f9a..9bbd3e336e 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java @@ -354,7 +354,7 @@ public class ProcessInstanceMapperProvider { * @return */ public String listByStatus(Map parameter) { - StringBuffer strStates = new StringBuffer(); + StringBuilder strStates = new StringBuilder(); int[] stateArray = (int[]) parameter.get("states"); for(int i=0;i parameter) { - StringBuffer strStates = new StringBuffer(); + StringBuilder strStates = new StringBuilder(); int[] stateArray = (int[]) parameter.get("states"); for(int i=0;i parameter) { - StringBuffer strStates = new StringBuffer(); + StringBuilder strStates = new StringBuilder(); int[] stateArray = (int[]) parameter.get("states"); for(int i=0;i parameter) { - StringBuffer strStates = new StringBuffer(); + StringBuilder strStates = new StringBuilder(); int[] stateArray = (int[]) parameter.get("states"); for(int i=0;i queryProjectExceptUserId(@Param("userId") Integer userId); + /** + * query all project list + * @return + */ + @Results(value = {@Result(property = "id", column = "id", id = true, javaType = Integer.class, jdbcType = JdbcType.INTEGER), + @Result(property = "userId", column = "user_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER), + @Result(property = "name", column = "name", javaType = String.class, jdbcType = JdbcType.VARCHAR), + @Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR), + @Result(property = "desc", column = "desc", javaType = String.class, jdbcType = JdbcType.VARCHAR), + @Result(property = "perm", column = "perm", javaType = Integer.class, jdbcType = JdbcType.INTEGER), + @Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE), + @Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE), + }) + @SelectProvider(type = ProjectMapperProvider.class, method = "queryAllProjectList") + List queryAllProjectList(); + } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProjectMapperProvider.java b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProjectMapperProvider.java index d6a2e4adcf..dae8c62eec 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProjectMapperProvider.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProjectMapperProvider.java @@ -240,4 +240,17 @@ public class ProjectMapperProvider { }}.toString(); } + /** + * query all project list + * @return + */ + public String queryAllProjectList() { + return new SQL() {{ + SELECT("*"); + FROM(TABLE_NAME); + WHERE("flag = 1"); + ORDER_BY("create_time desc"); + }}.toString(); + } + } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ScheduleMapperProvider.java b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ScheduleMapperProvider.java index 887c3c9117..85718b8cc1 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ScheduleMapperProvider.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ScheduleMapperProvider.java @@ -163,7 +163,7 @@ public class ScheduleMapperProvider { */ public String selectAllByProcessDefineArray(Map parameter) { - StringBuffer strIds = new StringBuffer(); + StringBuilder strIds = new StringBuilder(); int[] idsArray = (int[]) parameter.get("processDefineIds"); for(int i=0;i parameter) { - StringBuffer strStates = new StringBuffer(); + StringBuilder strStates = new StringBuilder(); int[] stateArray = (int[]) parameter.get("states"); for(int i=0;i parameter) { - StringBuffer strStates = new StringBuffer(); + StringBuilder strStates = new StringBuilder(); int[] stateArray = (int[]) parameter.get("states"); for(int i=0;i parameter) { - StringBuffer strStates = new StringBuffer(); + StringBuilder strStates = new StringBuilder(); int[] stateArray = (int[]) parameter.get("states"); int state = ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(); for(int i=0;i parameter){ - StringBuffer taskIdsStr = new StringBuffer(); + StringBuilder taskIdsStr = new StringBuilder(); int[] stateArray = (int[]) parameter.get("taskIds"); for(int i=0;i schemaList = SchemaUtils.getAllSchemaList(); @@ -76,11 +98,11 @@ public class EschedulerManager { schemaVersion = schemaDir.split("_")[0]; if(SchemaUtils.isAGreatVersion(schemaVersion , version)) { - logger.info("upgrade escheduler metadata version from " + version + " to " + schemaVersion); + logger.info("upgrade DolphinScheduler metadata version from " + version + " to " + schemaVersion); - logger.info("Begin upgrading escheduler's mysql table structure"); - upgradeDao.upgradeEscheduler(schemaDir); + logger.info("Begin upgrading DolphinScheduler's table structure"); + upgradeDao.upgradeDolphinScheduler(schemaDir); if(SchemaUtils.isAGreatVersion(version,"1.0.1")){ version = upgradeDao.getCurrentVersion(); }else { diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/MysqlUpgradeDao.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/MysqlUpgradeDao.java new file mode 100644 index 0000000000..17832896bc --- /dev/null +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/MysqlUpgradeDao.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package cn.escheduler.dao.upgrade; + +import cn.escheduler.common.utils.ConnectionUtils; +import cn.escheduler.dao.datasource.ConnectionFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; + +public class MysqlUpgradeDao extends UpgradeDao { + + public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class); + private static final String T_VERSION_NAME = "t_escheduler_version"; + private static final String rootDir = System.getProperty("user.dir"); + + @Override + protected void init() { + + } + + private static class MysqlUpgradeDaoHolder { + private static final MysqlUpgradeDao INSTANCE = new MysqlUpgradeDao(); + } + + private MysqlUpgradeDao() { + } + + public static final MysqlUpgradeDao getInstance() { + return MysqlUpgradeDaoHolder.INSTANCE; + } + + + /** + * Determines whether a table exists + * @param tableName + * @return + */ + public boolean isExistsTable(String tableName) { + Connection conn = null; + try { + conn = ConnectionFactory.getDataSource().getConnection(); + ResultSet rs = conn.getMetaData().getTables(null, null, tableName, null); + if (rs.next()) { + return true; + } else { + return false; + } + + } catch (SQLException e) { + logger.error(e.getMessage(),e); + throw new RuntimeException(e.getMessage(),e); + } finally { + ConnectionUtils.releaseResource(null, null, conn); + + } + + } + + /** + * Determines whether a field exists in the specified table + * @param tableName + * @param columnName + * @return + */ + public boolean isExistsColumn(String tableName,String columnName) { + Connection conn = null; + try { + conn = ConnectionFactory.getDataSource().getConnection(); + ResultSet rs = conn.getMetaData().getColumns(null,null,tableName,columnName); + if (rs.next()) { + return true; + } else { + return false; + } + + } catch (SQLException e) { + logger.error(e.getMessage(),e); + throw new RuntimeException(e.getMessage(),e); + } finally { + ConnectionUtils.releaseResource(null, null, conn); + + } + + } + +} diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/PostgresqlUpgradeDao.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/PostgresqlUpgradeDao.java new file mode 100644 index 0000000000..03ec8c819a --- /dev/null +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/PostgresqlUpgradeDao.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package cn.escheduler.dao.upgrade; + +import cn.escheduler.common.utils.ConnectionUtils; +import cn.escheduler.dao.datasource.ConnectionFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +public class PostgresqlUpgradeDao extends UpgradeDao { + + public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class); + private static final String T_VERSION_NAME = "t_escheduler_version"; + private static final String rootDir = System.getProperty("user.dir"); + private static final String schema = getSchema(); + + @Override + protected void init() { + + } + + private static class PostgresqlUpgradeDaoHolder { + private static final PostgresqlUpgradeDao INSTANCE = new PostgresqlUpgradeDao(); + } + + private PostgresqlUpgradeDao() { + } + + public static final PostgresqlUpgradeDao getInstance() { + return PostgresqlUpgradeDaoHolder.INSTANCE; + } + + + @Override + public void initSchema(String initSqlPath) { + super.initSchema(initSqlPath); + } + + private static String getSchema(){ + Connection conn = null; + PreparedStatement pstmt = null; + try { + conn = ConnectionFactory.getDataSource().getConnection(); + pstmt = conn.prepareStatement("select current_schema()"); + ResultSet resultSet = pstmt.executeQuery(); + while (resultSet.next()){ + if(resultSet.isFirst()){ + return resultSet.getString(1); + } + } + } catch (SQLException e) { + logger.error(e.getMessage(),e); + + } finally { + ConnectionUtils.releaseResource(null, null, conn); + } + return ""; + } + + /** + * Determines whether a table exists + * @param tableName + * @return + */ + public boolean isExistsTable(String tableName) { + Connection conn = null; + try { + conn = ConnectionFactory.getDataSource().getConnection(); + + ResultSet rs = conn.getMetaData().getTables(null, schema, tableName, null); + if (rs.next()) { + return true; + } else { + return false; + } + + } catch (SQLException e) { + logger.error(e.getMessage(),e); + throw new RuntimeException(e.getMessage(),e); + } finally { + ConnectionUtils.releaseResource(null, null, conn); + } + + } + + /** + * Determines whether a field exists in the specified table + * @param tableName + * @param columnName + * @return + */ + public boolean isExistsColumn(String tableName,String columnName) { + Connection conn = null; + try { + conn = ConnectionFactory.getDataSource().getConnection(); + ResultSet rs = conn.getMetaData().getColumns(null,schema,tableName,columnName); + if (rs.next()) { + return true; + } else { + return false; + } + + } catch (SQLException e) { + logger.error(e.getMessage(),e); + throw new RuntimeException(e.getMessage(),e); + } finally { + ConnectionUtils.releaseResource(null, null, conn); + + } + + } + +} diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java index 6fc8a61417..13c0deffb9 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java @@ -16,7 +16,8 @@ */ package cn.escheduler.dao.upgrade; -import cn.escheduler.common.utils.MysqlUtils; +import cn.escheduler.common.enums.DbType; +import cn.escheduler.common.utils.ConnectionUtils; import cn.escheduler.common.utils.ScriptRunner; import cn.escheduler.dao.AbstractBaseDao; import cn.escheduler.dao.datasource.ConnectionFactory; @@ -29,8 +30,9 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.text.MessageFormat; -public class UpgradeDao extends AbstractBaseDao { +public abstract class UpgradeDao extends AbstractBaseDao { public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class); private static final String T_VERSION_NAME = "t_escheduler_version"; @@ -41,35 +43,59 @@ public class UpgradeDao extends AbstractBaseDao { } - private static class UpgradeDaoHolder { - private static final UpgradeDao INSTANCE = new UpgradeDao(); - } - - private UpgradeDao() { + /** + * get db type + * @return + */ + public static DbType getDbType(){ + try { + Connection conn = ConnectionFactory.getDataSource().getConnection(); + String name = conn.getMetaData().getDatabaseProductName().toUpperCase(); + return DbType.valueOf(name); + } catch (Exception e) { + logger.error(e.getMessage(),e); + return null; + } } - public static final UpgradeDao getInstance() { - return UpgradeDaoHolder.INSTANCE; + public void initSchema(){ + DbType dbType = getDbType(); + String initSqlPath = ""; + if (dbType != null) { + switch (dbType) { + case MYSQL: + initSqlPath = "/sql/create/release-1.0.0_schema/mysql/"; + initSchema(initSqlPath); + break; + case POSTGRESQL: + initSqlPath = "/sql/create/release-1.2.0_schema/postgresql/"; + initSchema(initSqlPath); + break; + default: + logger.error("not support sql type: {},can't upgrade", dbType); + throw new IllegalArgumentException("not support sql type,can't upgrade"); + } + } } - - public void initEschedulerSchema() { + public void initSchema(String initSqlPath) { // Execute the escheduler DDL, it cannot be rolled back - runInitEschedulerDDL(); + runInitDDL(initSqlPath); // Execute the escheduler DML, it can be rolled back - runInitEschedulerDML(); + runInitDML(initSqlPath); } - private void runInitEschedulerDML() { + private void runInitDML(String initSqlPath) { Connection conn = null; if (StringUtils.isEmpty(rootDir)) { throw new RuntimeException("Environment variable user.dir not found"); } - String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql"; + //String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql"; + String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_dml.sql"; try { conn = ConnectionFactory.getDataSource().getConnection(); conn.setAutoCommit(false); @@ -98,18 +124,19 @@ public class UpgradeDao extends AbstractBaseDao { logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } finally { - MysqlUtils.releaseResource(null, null, conn); + ConnectionUtils.releaseResource(null, null, conn); } } - private void runInitEschedulerDDL() { + private void runInitDDL(String initSqlPath) { Connection conn = null; if (StringUtils.isEmpty(rootDir)) { throw new RuntimeException("Environment variable user.dir not found"); } - String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/escheduler_ddl.sql"; + //String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql"; + String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_ddl.sql"; try { conn = ConnectionFactory.getDataSource().getConnection(); // Execute the escheduler_ddl.sql script to create the table structure of escheduler @@ -126,7 +153,7 @@ public class UpgradeDao extends AbstractBaseDao { logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } finally { - MysqlUtils.releaseResource(null, null, conn); + ConnectionUtils.releaseResource(null, null, conn); } @@ -137,26 +164,7 @@ public class UpgradeDao extends AbstractBaseDao { * @param tableName * @return */ - public boolean isExistsTable(String tableName) { - Connection conn = null; - try { - conn = ConnectionFactory.getDataSource().getConnection(); - ResultSet rs = conn.getMetaData().getTables(null, null, tableName, null); - if (rs.next()) { - return true; - } else { - return false; - } - - } catch (SQLException e) { - logger.error(e.getMessage(),e); - throw new RuntimeException(e.getMessage(),e); - } finally { - MysqlUtils.releaseResource(null, null, conn); - - } - - } + public abstract boolean isExistsTable(String tableName); /** * Determines whether a field exists in the specified table @@ -164,26 +172,7 @@ public class UpgradeDao extends AbstractBaseDao { * @param columnName * @return */ - public boolean isExistsColumn(String tableName,String columnName) { - Connection conn = null; - try { - conn = ConnectionFactory.getDataSource().getConnection(); - ResultSet rs = conn.getMetaData().getColumns(null,null,tableName,columnName); - if (rs.next()) { - return true; - } else { - return false; - } - - } catch (SQLException e) { - logger.error(e.getMessage(),e); - throw new RuntimeException(e.getMessage(),e); - } finally { - MysqlUtils.releaseResource(null, null, conn); - - } - - } + public abstract boolean isExistsColumn(String tableName,String columnName); public String getCurrentVersion() { @@ -207,26 +196,26 @@ public class UpgradeDao extends AbstractBaseDao { logger.error(e.getMessage(),e); throw new RuntimeException("sql: " + sql, e); } finally { - MysqlUtils.releaseResource(rs, pstmt, conn); - + ConnectionUtils.releaseResource(rs, pstmt, conn); } } - public void upgradeEscheduler(String schemaDir) { + public void upgradeDolphinScheduler(String schemaDir) { - upgradeEschedulerDDL(schemaDir); + upgradeDolphinSchedulerDDL(schemaDir); - upgradeEschedulerDML(schemaDir); + upgradeDolphinSchedulerDML(schemaDir); } - private void upgradeEschedulerDML(String schemaDir) { + private void upgradeDolphinSchedulerDML(String schemaDir) { String schemaVersion = schemaDir.split("_")[0]; if (StringUtils.isEmpty(rootDir)) { throw new RuntimeException("Environment variable user.dir not found"); } - String mysqlSQLFilePath = rootDir + "/sql/upgrade/" + schemaDir + "/mysql/escheduler_dml.sql"; + String mysqlSQLFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_dml.sql",rootDir,schemaDir,getDbType().name().toLowerCase()); + logger.info("mysqlSQLFilePath"+mysqlSQLFilePath); Connection conn = null; PreparedStatement pstmt = null; try { @@ -277,16 +266,16 @@ public class UpgradeDao extends AbstractBaseDao { logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } finally { - MysqlUtils.releaseResource(null, pstmt, conn); + ConnectionUtils.releaseResource(null, pstmt, conn); } } - private void upgradeEschedulerDDL(String schemaDir) { + private void upgradeDolphinSchedulerDDL(String schemaDir) { if (StringUtils.isEmpty(rootDir)) { throw new RuntimeException("Environment variable user.dir not found"); } - String mysqlSQLFilePath = rootDir + "/sql/upgrade/" + schemaDir + "/mysql/escheduler_ddl.sql"; + String mysqlSQLFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_dml.sql",rootDir,schemaDir,getDbType().name().toLowerCase()); Connection conn = null; PreparedStatement pstmt = null; try { @@ -316,7 +305,7 @@ public class UpgradeDao extends AbstractBaseDao { logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } finally { - MysqlUtils.releaseResource(null, pstmt, conn); + ConnectionUtils.releaseResource(null, pstmt, conn); } } @@ -338,7 +327,7 @@ public class UpgradeDao extends AbstractBaseDao { logger.error(e.getMessage(),e); throw new RuntimeException("sql: " + upgradeSQL, e); } finally { - MysqlUtils.releaseResource(null, pstmt, conn); + ConnectionUtils.releaseResource(null, pstmt, conn); } } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateDolphinScheduler.java similarity index 66% rename from escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java rename to escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateDolphinScheduler.java index 2f1e070e7b..2c827dfea4 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateDolphinScheduler.java @@ -16,29 +16,29 @@ */ package cn.escheduler.dao.upgrade.shell; -import cn.escheduler.dao.upgrade.EschedulerManager; +import cn.escheduler.dao.upgrade.DolphinSchedulerManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * init escheduler + * init DolphinScheduler * */ -public class CreateEscheduler { +public class CreateDolphinScheduler { - private static final Logger logger = LoggerFactory.getLogger(CreateEscheduler.class); + private static final Logger logger = LoggerFactory.getLogger(CreateDolphinScheduler.class); public static void main(String[] args) { - EschedulerManager eschedulerManager = new EschedulerManager(); + DolphinSchedulerManager dolphinSchedulerManager = new DolphinSchedulerManager(); try { - eschedulerManager.initEscheduler(); - logger.info("init escheduler finished"); - eschedulerManager.upgradeEscheduler(); - logger.info("upgrade escheduler finished"); - logger.info("create escheduler success"); + dolphinSchedulerManager.initDolphinScheduler(); + logger.info("init DolphinScheduler finished"); + dolphinSchedulerManager.upgradeDolphinScheduler(); + logger.info("upgrade DolphinScheduler finished"); + logger.info("create DolphinScheduler success"); } catch (Exception e) { - logger.error("create escheduler failed",e); + logger.error("create DolphinScheduler failed",e); } } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitEscheduler.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitDolphinScheduler.java similarity index 72% rename from escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitEscheduler.java rename to escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitDolphinScheduler.java index e88bb1e3f1..4c01f7413b 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitEscheduler.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitDolphinScheduler.java @@ -16,23 +16,23 @@ */ package cn.escheduler.dao.upgrade.shell; -import cn.escheduler.dao.upgrade.EschedulerManager; +import cn.escheduler.dao.upgrade.DolphinSchedulerManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * init escheduler + * init DolphinScheduler * */ -public class InitEscheduler { +public class InitDolphinScheduler { - private static final Logger logger = LoggerFactory.getLogger(InitEscheduler.class); + private static final Logger logger = LoggerFactory.getLogger(InitDolphinScheduler.class); public static void main(String[] args) { - Thread.currentThread().setName("manager-InitEscheduler"); - EschedulerManager eschedulerManager = new EschedulerManager(); - eschedulerManager.initEscheduler(); - logger.info("init escheduler finished"); + Thread.currentThread().setName("manager-InitDolphinScheduler"); + DolphinSchedulerManager dolphinSchedulerManager = new DolphinSchedulerManager(); + dolphinSchedulerManager.initDolphinScheduler(); + logger.info("init DolphinScheduler finished"); } } diff --git a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeDolphinScheduler.java similarity index 73% rename from escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java rename to escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeDolphinScheduler.java index 7608d8ce6f..56e706cd2b 100644 --- a/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java +++ b/escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeDolphinScheduler.java @@ -16,28 +16,26 @@ */ package cn.escheduler.dao.upgrade.shell; -import cn.escheduler.dao.upgrade.EschedulerManager; +import cn.escheduler.dao.upgrade.DolphinSchedulerManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * upgrade escheduler database + * upgrade DolphinScheduler database */ -public class UpgradeEscheduler { - private static final Logger logger = LoggerFactory.getLogger(UpgradeEscheduler.class); +public class UpgradeDolphinScheduler { + private static final Logger logger = LoggerFactory.getLogger(UpgradeDolphinScheduler.class); public static void main(String[] args) { - EschedulerManager eschedulerManager = new EschedulerManager(); + DolphinSchedulerManager dolphinSchedulerManager = new DolphinSchedulerManager(); try { - eschedulerManager.upgradeEscheduler(); - logger.info("upgrade escheduler success"); + dolphinSchedulerManager.upgradeDolphinScheduler(); + logger.info("upgrade DolphinScheduler success"); } catch (Exception e) { logger.error(e.getMessage(),e); - logger.info("Upgrade escheduler failed"); + logger.info("Upgrade DolphinScheduler failed"); throw new RuntimeException(e); } - - } diff --git a/escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java b/escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java index 231273e2a1..d68b181660 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java @@ -33,6 +33,7 @@ import org.quartz.SchedulerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.SpringApplication; import org.springframework.boot.WebApplicationType; import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.context.annotation.ComponentScan; @@ -90,10 +91,9 @@ public class MasterServer extends AbstractServer { * master server not use web service */ public static void main(String[] args) { - SpringApplicationBuilder app = new SpringApplicationBuilder(MasterServer.class); + SpringApplication app = new SpringApplication(MasterServer.class); - app.web(WebApplicationType.NONE) - .run(args); + app.run(args); } diff --git a/escheduler-server/src/main/java/cn/escheduler/server/utils/FlinkArgsUtils.java b/escheduler-server/src/main/java/cn/escheduler/server/utils/FlinkArgsUtils.java new file mode 100644 index 0000000000..308103073d --- /dev/null +++ b/escheduler-server/src/main/java/cn/escheduler/server/utils/FlinkArgsUtils.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package cn.escheduler.server.utils; + + +import cn.escheduler.common.Constants; +import cn.escheduler.common.enums.ProgramType; +import cn.escheduler.common.task.flink.FlinkParameters; +import org.apache.commons.lang.StringUtils; + +import java.util.ArrayList; +import java.util.List; + + +/** + * spark args utils + */ +public class FlinkArgsUtils { + + /** + * build args + * @param param + * @return + */ + public static List buildArgs(FlinkParameters param) { + List args = new ArrayList<>(); + + args.add(Constants.FLINK_RUN_MODE); //-m + + args.add(Constants.FLINK_YARN_CLUSTER); //yarn-cluster + + if (param.getSlot() != 0) { + args.add(Constants.FLINK_YARN_SLOT); + args.add(String.format("%d", param.getSlot())); //-ys + } + + if (StringUtils.isNotEmpty(param.getAppName())) { //-ynm + args.add(Constants.FLINK_APP_NAME); + args.add(param.getAppName()); + } + + if (param.getTaskManager() != 0) { //-yn + args.add(Constants.FLINK_TASK_MANAGE); + args.add(String.format("%d", param.getTaskManager())); + } + + if (StringUtils.isNotEmpty(param.getJobManagerMemory())) { + args.add(Constants.FLINK_JOB_MANAGE_MEM); + args.add(param.getJobManagerMemory()); //-yjm + } + + if (StringUtils.isNotEmpty(param.getTaskManagerMemory())) { // -ytm + args.add(Constants.FLINK_TASK_MANAGE_MEM); + args.add(param.getTaskManagerMemory()); + } + args.add(Constants.FLINK_detach); //-d + + + if(param.getProgramType() !=null ){ + if(param.getProgramType()!=ProgramType.PYTHON){ + if (StringUtils.isNotEmpty(param.getMainClass())) { + args.add(Constants.FLINK_MAIN_CLASS); //-c + args.add(param.getMainClass()); //main class + } + } + } + + if (param.getMainJar() != null) { + args.add(param.getMainJar().getRes()); + } + + + // --files --conf --libjar ... + if (StringUtils.isNotEmpty(param.getOthers())) { + String others = param.getOthers(); + if(!others.contains("--queue")){ + if (StringUtils.isNotEmpty(param.getQueue())) { + args.add(Constants.SPARK_QUEUE); + args.add(param.getQueue()); + } + } + args.add(param.getOthers()); + }else if (StringUtils.isNotEmpty(param.getQueue())) { + args.add(Constants.SPARK_QUEUE); + args.add(param.getQueue()); + + } + + if (StringUtils.isNotEmpty(param.getMainArgs())) { + args.add(param.getMainArgs()); + } + + return args; + } + +} diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java index af9a8ee997..61ca0b309d 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java @@ -39,6 +39,7 @@ import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.SpringApplication; import org.springframework.boot.WebApplicationType; import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.context.annotation.ComponentScan; @@ -121,10 +122,9 @@ public class WorkerServer extends AbstractServer { */ public static void main(String[] args) { - SpringApplicationBuilder app = new SpringApplicationBuilder(WorkerServer.class); + SpringApplication app = new SpringApplication(WorkerServer.class); - app.web(WebApplicationType.NONE) - .run(args); + app.run(args); } diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java index 31c4def1da..07ca740d24 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java @@ -187,8 +187,9 @@ public class FetchTaskThread implements Runnable{ continue; } - // set queue for process instance - taskInstance.getProcessInstance().setQueue(tenant.getQueue()); + // set queue for process instance, user-specified queue takes precedence over tenant queue + String userQueue = processDao.queryUserQueueByProcessInstanceId(taskInstance.getProcessInstanceId()); + taskInstance.getProcessInstance().setQueue(StringUtils.isEmpty(userQueue) ? tenant.getQueue() : userQueue); logger.info("worker fetch taskId : {} from queue ", taskInstId); diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/task/AbstractTask.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/task/AbstractTask.java index 213f4fd3f9..6472873d8b 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/worker/task/AbstractTask.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/task/AbstractTask.java @@ -22,6 +22,7 @@ import cn.escheduler.common.enums.TaskRecordStatus; import cn.escheduler.common.enums.TaskType; import cn.escheduler.common.process.Property; import cn.escheduler.common.task.AbstractParameters; +import cn.escheduler.common.task.flink.FlinkParameters; import cn.escheduler.common.task.mr.MapreduceParameters; import cn.escheduler.common.task.procedure.ProcedureParameters; import cn.escheduler.common.task.python.PythonParameters; @@ -178,6 +179,8 @@ public abstract class AbstractTask { case SPARK: paramsClass = SparkParameters.class; break; + case FLINK: + paramsClass = FlinkParameters.class; case PYTHON: paramsClass = PythonParameters.class; break; diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/task/TaskManager.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/task/TaskManager.java index e23a29ae08..986a6179c9 100644 --- a/escheduler-server/src/main/java/cn/escheduler/server/worker/task/TaskManager.java +++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/task/TaskManager.java @@ -19,6 +19,7 @@ package cn.escheduler.server.worker.task; import cn.escheduler.common.enums.TaskType; import cn.escheduler.server.worker.task.dependent.DependentTask; +import cn.escheduler.server.worker.task.flink.FlinkTask; import cn.escheduler.server.worker.task.mr.MapReduceTask; import cn.escheduler.server.worker.task.processdure.ProcedureTask; import cn.escheduler.server.worker.task.python.PythonTask; @@ -55,6 +56,8 @@ public class TaskManager { return new MapReduceTask(props, logger); case SPARK: return new SparkTask(props, logger); + case FLINK: + return new FlinkTask(props, logger); case PYTHON: return new PythonTask(props, logger); case DEPENDENT: diff --git a/escheduler-server/src/main/java/cn/escheduler/server/worker/task/flink/FlinkTask.java b/escheduler-server/src/main/java/cn/escheduler/server/worker/task/flink/FlinkTask.java new file mode 100644 index 0000000000..bf6f0cc9fb --- /dev/null +++ b/escheduler-server/src/main/java/cn/escheduler/server/worker/task/flink/FlinkTask.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package cn.escheduler.server.worker.task.flink; + +import cn.escheduler.common.process.Property; +import cn.escheduler.common.task.AbstractParameters; +import cn.escheduler.common.task.flink.FlinkParameters; +import cn.escheduler.common.utils.JSONUtils; +import cn.escheduler.common.utils.ParameterUtils; +import cn.escheduler.dao.model.ProcessInstance; +import cn.escheduler.server.utils.FlinkArgsUtils; +import cn.escheduler.server.utils.ParamUtils; +import cn.escheduler.server.worker.task.AbstractYarnTask; +import cn.escheduler.server.worker.task.TaskProps; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * flink task + */ +public class FlinkTask extends AbstractYarnTask { + + /** + * flink command + */ + private static final String FLINK_COMMAND = "flink"; + private static final String FLINK_RUN = "run"; + + /** + * flink parameters + */ + private FlinkParameters flinkParameters; + + public FlinkTask(TaskProps props, Logger logger) { + super(props, logger); + } + + @Override + public void init() { + + logger.info("flink task params {}", taskProps.getTaskParams()); + + flinkParameters = JSONUtils.parseObject(taskProps.getTaskParams(), FlinkParameters.class); + + if (!flinkParameters.checkParameters()) { + throw new RuntimeException("flink task params is not valid"); + } + flinkParameters.setQueue(taskProps.getQueue()); + + if (StringUtils.isNotEmpty(flinkParameters.getMainArgs())) { + String args = flinkParameters.getMainArgs(); + // get process instance by task instance id + ProcessInstance processInstance = processDao.findProcessInstanceByTaskId(taskProps.getTaskInstId()); + + /** + * combining local and global parameters + */ + Map paramsMap = ParamUtils.convert(taskProps.getUserDefParamsMap(), + taskProps.getDefinedParams(), + flinkParameters.getLocalParametersMap(), + processInstance.getCmdTypeIfComplement(), + processInstance.getScheduleTime()); + + logger.info("param Map : {}", paramsMap); + if (paramsMap != null ){ + + args = ParameterUtils.convertParameterPlaceholders(args, ParamUtils.convert(paramsMap)); + logger.info("param args : {}", args); + } + flinkParameters.setMainArgs(args); + } + } + + /** + * create command + * @return + */ + @Override + protected String buildCommand() { + List args = new ArrayList<>(); + + args.add(FLINK_COMMAND); + args.add(FLINK_RUN); + logger.info("flink task args : {}", args); + // other parameters + args.addAll(FlinkArgsUtils.buildArgs(flinkParameters)); + + String command = ParameterUtils + .convertParameterPlaceholders(String.join(" ", args), taskProps.getDefinedParams()); + + logger.info("flink task command : {}", command); + + return command; + } + + @Override + public AbstractParameters getParameters() { + return flinkParameters; + } +} diff --git a/escheduler-server/src/main/resources/application_master.properties b/escheduler-server/src/main/resources/application_master.properties index cc4774ae94..68fe3dd02c 100644 --- a/escheduler-server/src/main/resources/application_master.properties +++ b/escheduler-server/src/main/resources/application_master.properties @@ -1 +1,4 @@ logging.config=classpath:master_logback.xml + +# server port +server.port=5566 diff --git a/escheduler-server/src/main/resources/application_worker.properties b/escheduler-server/src/main/resources/application_worker.properties new file mode 100644 index 0000000000..b7e3c88b24 --- /dev/null +++ b/escheduler-server/src/main/resources/application_worker.properties @@ -0,0 +1,4 @@ +logging.config=classpath:worker_logback.xml + +# server port +server.port=7788 diff --git a/escheduler-ui/build/webpack.config.prod.js b/escheduler-ui/build/webpack.config.prod.js index 595de11076..4a287feb8e 100644 --- a/escheduler-ui/build/webpack.config.prod.js +++ b/escheduler-ui/build/webpack.config.prod.js @@ -100,8 +100,10 @@ const config = merge.smart(baseConfig, { sourceMap: true, uglifyOptions: { compress: { + warnings: false, + drop_debugger: true, drop_console: true, - drop_debugger: true + pure_funcs: ['console.log']//移除console }, comments: function (n, c) { /*! IMPORTANT: Please preserve 3rd-party library license info, inspired from @allex/amd-build-worker/config/jsplumb.js */ diff --git a/escheduler-ui/install-escheduler-ui.sh b/escheduler-ui/install-escheduler-ui.sh index be9a3801ac..3c9578746a 100755 --- a/escheduler-ui/install-escheduler-ui.sh +++ b/escheduler-ui/install-escheduler-ui.sh @@ -1,21 +1,21 @@ #!/bin/bash -# 当前路径 +# current path esc_basepath=$(cd `dirname $0`; pwd) menu(){ cat <> /etc/nginx/conf.d/escheduler.conf + " >> /etc/nginx/conf.d/dolphinscheduler.conf } ubuntu(){ - #更新源 + # update source apt-get update - #安装nginx + # install nginx apt-get install -y nginx - # 配置nginx - eschedulerConf $1 $2 + # config nginx + dolphinschedulerConf $1 $2 - # 启动nginx + # startup nginx /etc/init.d/nginx start sleep 1 if [ $? -ne 0 ];then @@ -81,17 +81,17 @@ centos7(){ rpm -Uvh http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm yum install -y nginx - # 配置nginx - eschedulerConf $1 $2 + # config nginx + dolphinschedulerConf $1 $2 - # 解决 0.0.0.0:8888 问题 + # solve 0.0.0.0:8888 problem yum -y install policycoreutils-python semanage port -a -t http_port_t -p tcp $esc_proxy - # 开放前端访问端口 + # open front access port firewall-cmd --zone=public --add-port=$esc_proxy/tcp --permanent - # 启动nginx + # startup nginx systemctl start nginx sleep 1 if [ $? -ne 0 ];then @@ -99,9 +99,9 @@ centos7(){ fi nginx -s reload - # 调整SELinux的参数 + # set SELinux parameters sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config - # 临时生效 + # temporary effect setenforce 0 } @@ -114,10 +114,10 @@ centos6(){ # install nginx yum install nginx -y - # 配置nginx - eschedulerConf $1 $2 + # config nginx + dolphinschedulerConf $1 $2 - # 启动nginx + # startup nginx /etc/init.d/nginx start sleep 1 if [ $? -ne 0 ];then @@ -125,17 +125,17 @@ centos6(){ fi nginx -s reload - # 调整SELinux的参数 + # set SELinux parameters sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config - # 临时生效 + # temporary effect setenforce 0 } function main(){ - echo "欢迎使用easy scheduler前端部署脚本,目前前端部署脚本仅支持CentOS,Ubuntu" - echo "请在 escheduler-ui 目录下执行" + echo "Welcome to thedolphinscheduler front-end deployment script, which is currently only supported by front-end deployment scripts : CentOS and Ubuntu" + echo "Please execute in the dolphinscheduler-ui directory" #To be compatible with MacOS and Linux if [[ "$OSTYPE" == "darwin"* ]]; then @@ -166,33 +166,33 @@ function main(){ fi - # 配置前端访问端口 - read -p "请输入nginx代理端口,不输入,则默认8888 :" esc_proxy_port + # config front-end access ports + read -p "Please enter the nginx proxy port, do not enter, the default is 8888 :" esc_proxy_port if [ -z "${esc_proxy_port}" ];then esc_proxy_port="8888" fi - read -p "请输入api server代理ip,必须输入,例如:192.168.xx.xx :" esc_api_server_ip + read -p "Please enter the api server proxy ip, you must enter, for example: 192.168.xx.xx :" esc_api_server_ip if [ -z "${esc_api_server_ip}" ];then - echo "api server代理ip不能为空." + echo "api server proxy ip can not be empty." exit 1 fi - read -p "请输入api server代理端口,不输入,则默认12345 :" esc_api_server_port + read -p "Please enter the api server proxy port, do not enter, the default is 12345:" esc_api_server_port if [ -z "${esc_api_server_port}" ];then esc_api_server_port="12345" fi - # api server后端地址 + # api server backend address esc_api_server="http://$esc_api_server_ip:$esc_api_server_port" - # 本机ip地址 + # local ip address esc_ipaddr=$(ip a | grep inet | grep -v inet6 | grep -v 127 | sed 's/^[ \t]*//g' | cut -d ' ' -f2 | head -n 1 | awk -F '/' '{print $1}') - # 提示信息 + # Prompt message menu - read -p "请输入安装编号(1|2|3|4):" num + read -p "Please enter the installation number(1|2|3|4):" num case $num in 1) @@ -212,7 +212,7 @@ function main(){ echo $"Usage :sh $0" exit 1 esac - echo "请浏览器访问:http://${esc_ipaddr}:${esc_proxy_port}" + echo "Please visit the browser:http://${esc_ipaddr}:${esc_proxy_port}" } diff --git a/escheduler-ui/package.json b/escheduler-ui/package.json index acfa498a54..014205ba16 100644 --- a/escheduler-ui/package.json +++ b/escheduler-ui/package.json @@ -1,5 +1,5 @@ { - "name": "escheduler", + "name": "dolphinscheduler", "version": "1.0.0", "description": "调度平台前端项目", "author": "gongzijian ", @@ -27,7 +27,6 @@ "babel-plugin-transform-runtime": "^6.23.0", "babel-plugin-transform-vue-jsx": "^3.5.0", "babel-preset-env": "^1.6.1", - "babel-runtime": "^6.26.0", "bootstrap": "3.3.7", "canvg": "1.5", "clipboard": "^2.0.1", @@ -77,16 +76,7 @@ ] }, "devDependencies": { - "jasmine-core": "^3.2.1", "jquery": "1.12.4", - "karma": "^3.0.0", - "karma-browserstack-launcher": "^1.3.0", - "karma-chrome-launcher": "^2.2.0", - "karma-coverage": "^1.1.2", - "karma-jasmine": "^1.1.2", - "karma-sourcemap-loader": "^0.3.7", - "karma-spec-reporter": "^0.0.32", - "karma-webpack": "^3.0.0", "vue": "^2.5.17", "vue-router": "2.7.0", "vuex": "^3.0.0" diff --git a/escheduler-ui/src/js/conf/home/pages/dag/_source/config.js b/escheduler-ui/src/js/conf/home/pages/dag/_source/config.js index 9c1065870f..5305bf7476 100644 --- a/escheduler-ui/src/js/conf/home/pages/dag/_source/config.js +++ b/escheduler-ui/src/js/conf/home/pages/dag/_source/config.js @@ -260,6 +260,10 @@ let tasksType = { desc: 'SPARK', color: '#E46F13' }, + 'FLINK': { + desc: 'FLINK', + color: '#E46F13' + }, 'MR': { desc: 'MapReduce', color: '#A0A5CC' diff --git a/escheduler-ui/src/js/conf/home/pages/dag/_source/dag.scss b/escheduler-ui/src/js/conf/home/pages/dag/_source/dag.scss index 95592e0754..37d3acaa19 100644 --- a/escheduler-ui/src/js/conf/home/pages/dag/_source/dag.scss +++ b/escheduler-ui/src/js/conf/home/pages/dag/_source/dag.scss @@ -70,6 +70,9 @@ .icos-SPARK { background: url("../img/toolbar_SPARK.png") no-repeat 50% 50%; } + .icos-FLINK { + background: url("../img/toobar_flink.svg") no-repeat 50% 50%; + } .icos-MR { background: url("../img/toolbar_MR.png") no-repeat 50% 50%; } diff --git a/escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue b/escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue index 863a44abf5..a46b894d11 100644 --- a/escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue +++ b/escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue @@ -19,13 +19,13 @@
@@ -52,13 +52,13 @@
@@ -96,68 +96,74 @@ + ref="timeout" + :backfill-item="backfillItem" + @on-timeout="_onTimeout"> + v-if="taskType === 'SHELL'" + @on-params="_onParams" + ref="SHELL" + :backfill-item="backfillItem"> + v-if="taskType === 'SUB_PROCESS'" + @on-params="_onParams" + @on-set-process-name="_onSetProcessName" + ref="SUB_PROCESS" + :backfill-item="backfillItem"> + v-if="taskType === 'PROCEDURE'" + @on-params="_onParams" + ref="PROCEDURE" + :backfill-item="backfillItem"> + v-if="taskType === 'SQL'" + @on-params="_onParams" + ref="SQL" + :create-node-id="id" + :backfill-item="backfillItem"> + v-if="taskType === 'SPARK'" + @on-params="_onParams" + ref="SPARK" + :backfill-item="backfillItem"> + + + v-if="taskType === 'MR'" + @on-params="_onParams" + ref="MR" + :backfill-item="backfillItem"> + v-if="taskType === 'PYTHON'" + @on-params="_onParams" + ref="PYTHON" + :backfill-item="backfillItem"> + v-if="taskType === 'DEPENDENT'" + @on-dependent="_onDependent" + ref="DEPENDENT" + :backfill-item="backfillItem"> @@ -178,6 +184,7 @@ import i18n from '@/module/i18n' import mShell from './tasks/shell' import mSpark from './tasks/spark' + import mFlink from './tasks/flink' import mPython from './tasks/python' import JSP from './../plugIn/jsPlumbHandle' import mProcedure from './tasks/procedure' @@ -284,12 +291,12 @@ } this.store.dispatch('dag/getSubProcessId', { taskId: stateId }).then(res => { this.$emit('onSubProcess', { - subProcessId: res.data.subProcessInstanceId, - fromThis: this - }) - }).catch(e => { - this.$message.error(e.msg || '') + subProcessId: res.data.subProcessInstanceId, + fromThis: this }) + }).catch(e => { + this.$message.error(e.msg || '') + }) } else { this.$emit('onSubProcess', { subProcessId: this.backfillItem.params.processDefinitionId, @@ -413,10 +420,10 @@ if (taskList.length) { taskList.forEach(v => { if (v.id === this.id) { - o = v - this.backfillItem = v - } - }) + o = v + this.backfillItem = v + } + }) // Non-null objects represent backfill if (!_.isEmpty(o)) { this.name = o.name @@ -455,6 +462,7 @@ mSql, mLog, mSpark, + mFlink, mPython, mDependent, mSelectInput, diff --git a/escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/_source/dependItemList.vue b/escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/_source/dependItemList.vue index 8df34ae24e..2cd282a155 100644 --- a/escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/_source/dependItemList.vue +++ b/escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/_source/dependItemList.vue @@ -1,15 +1,19 @@ diff --git a/escheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue b/escheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue index 378f410d38..b45e6555ab 100644 --- a/escheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue +++ b/escheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue @@ -203,7 +203,6 @@ }, _submit () { this.$refs['popup'].spinnerLoading = true - console.log(this.tenantId.id) let param = { userName: this.userName, userPassword: this.userPassword, diff --git a/escheduler-ui/src/js/conf/home/store/dag/actions.js b/escheduler-ui/src/js/conf/home/store/dag/actions.js index 7482dbed9b..9d73918f6d 100644 --- a/escheduler-ui/src/js/conf/home/store/dag/actions.js +++ b/escheduler-ui/src/js/conf/home/store/dag/actions.js @@ -261,6 +261,35 @@ export default { }) }) }, + /** + * Get a list of project + */ + getProjectList ({ state }, payload) { + return new Promise((resolve, reject) => { + if (state.projectListS.length) { + resolve() + return + } + io.get(`projects/queryAllProjectList`, payload, res => { + state.projectListS = res.data + resolve(res.data) + }).catch(res => { + reject(res) + }) + }) + }, + /** + * Get a list of process definitions by project id + */ + getProcessByProjectId ({ state }, payload) { + return new Promise((resolve, reject) => { + io.get(`projects/${state.projectName}/process/queryProccessDefinitionAllByProjectId`, payload, res => { + resolve(res.data) + }).catch(res => { + reject(res) + }) + }) + }, /** * get datasource */ diff --git a/escheduler-ui/src/js/conf/home/store/dag/mutations.js b/escheduler-ui/src/js/conf/home/store/dag/mutations.js index d3386bc76a..4a94fe093e 100644 --- a/escheduler-ui/src/js/conf/home/store/dag/mutations.js +++ b/escheduler-ui/src/js/conf/home/store/dag/mutations.js @@ -109,6 +109,7 @@ export default { state.tenantId = payload && payload.tenantId || -1 state.processListS = payload && payload.processListS || [] state.resourcesListS = payload && payload.resourcesListS || [] + state.projectListS = payload && payload.projectListS || [] state.isDetails = payload && payload.isDetails || false state.runFlag = payload && payload.runFlag || '' state.locations = payload && payload.locations || {} diff --git a/escheduler-ui/src/js/conf/home/store/dag/state.js b/escheduler-ui/src/js/conf/home/store/dag/state.js index c875500f5f..26891a6774 100644 --- a/escheduler-ui/src/js/conf/home/store/dag/state.js +++ b/escheduler-ui/src/js/conf/home/store/dag/state.js @@ -47,6 +47,8 @@ export default { syncDefine: true, // tasks processList processListS: [], + // projectList + projectListS: [], // tasks resourcesList resourcesListS: [], // tasks datasource Type diff --git a/escheduler-ui/src/js/module/i18n/locale/zh_CN.js b/escheduler-ui/src/js/module/i18n/locale/zh_CN.js index 7ac3dda87e..cfa0224185 100644 --- a/escheduler-ui/src/js/module/i18n/locale/zh_CN.js +++ b/escheduler-ui/src/js/module/i18n/locale/zh_CN.js @@ -476,5 +476,9 @@ export default { 'warning of timeout': '超时告警', 'Next five execution times': '接下来五次执行时间', 'Execute time': '执行时间', - 'Complement range': '补数范围' + 'Complement range': '补数范围', + 'slot':'slot数量', + 'taskManager':'taskManage数量', + 'jobManagerMemory':'jobManager内存数', + 'taskManagerMemory':'taskManager内存数' } diff --git a/escheduler-ui/src/lib/@analysys/ans-ui/package.json b/escheduler-ui/src/lib/@analysys/ans-ui/package.json index cddb561eb3..061f54a7ad 100644 --- a/escheduler-ui/src/lib/@analysys/ans-ui/package.json +++ b/escheduler-ui/src/lib/@analysys/ans-ui/package.json @@ -53,7 +53,6 @@ "babel-plugin-transform-runtime": "^6.23.0", "babel-plugin-transform-vue-jsx": "^3.7.0", "babel-preset-env": "^1.5.2", - "babel-runtime": "^6.26.0", "cross-env": "^5.2.0", "css-loader": "0.28.8", "cssnano": "^4.0.3", diff --git a/escheduler-ui/src/lib/@fedor/io/package.json b/escheduler-ui/src/lib/@fedor/io/package.json index 0066dfa8f0..20563bedeb 100644 --- a/escheduler-ui/src/lib/@fedor/io/package.json +++ b/escheduler-ui/src/lib/@fedor/io/package.json @@ -23,7 +23,6 @@ "babel-plugin-transform-class-properties": "^6.24.1", "babel-plugin-transform-runtime": "^6.23.0", "babel-preset-env": "^1.5.2", - "babel-runtime": "^6.26.0", "body-parser": "^1.17.2", "chai": "^4.1.1", "cors": "^2.8.4", diff --git a/escheduler-ui/src/lib/@vue/crontab/example/app.vue b/escheduler-ui/src/lib/@vue/crontab/example/app.vue index c691638a64..7328886801 100644 --- a/escheduler-ui/src/lib/@vue/crontab/example/app.vue +++ b/escheduler-ui/src/lib/@vue/crontab/example/app.vue @@ -36,7 +36,6 @@ _lang (type) { this.is = false this.lang = type - console.log(this.lang) setTimeout(() => { this.is = true }, 1) diff --git a/escheduler-ui/src/view/docs/zh_CN/_book/images/flink_edit.png b/escheduler-ui/src/view/docs/zh_CN/_book/images/flink_edit.png new file mode 100644 index 0000000000..b7c2321157 Binary files /dev/null and b/escheduler-ui/src/view/docs/zh_CN/_book/images/flink_edit.png differ diff --git a/install.sh b/install.sh index 894a4c2f18..dc09ce3c97 100644 --- a/install.sh +++ b/install.sh @@ -34,252 +34,257 @@ fi source ${workDir}/conf/config/run_config.conf source ${workDir}/conf/config/install_config.conf -# mysql配置 -# mysql 地址,端口 +# mysql config +# mysql address and port mysqlHost="192.168.xx.xx:3306" -# mysql 数据库名称 +# mysql database mysqlDb="escheduler" -# mysql 用户名 +# mysql username mysqlUserName="xx" -# mysql 密码 -# 注意:如果有特殊字符,请用 \ 转移符进行转移 +# mysql passwprd +# Note: if there are special characters, please use the \ transfer character to transfer mysqlPassword="xx" -# conf/config/install_config.conf配置 -# 注意:安装路径,不要当前路径(pwd)一样 +# conf/config/install_config.conf config +# Note: the installation path is not the same as the current path (pwd) installPath="/data1_1T/escheduler" -# 部署用户 -# 注意:部署用户需要有sudo权限及操作hdfs的权限,如果开启hdfs,根目录需要自行创建 +# deployment user +# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself deployUser="escheduler" -# zk集群 +# zk cluster zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" -# 安装hosts -# 注意:安装调度的机器hostname列表,如果是伪分布式,则只需写一个伪分布式hostname即可 +# install hosts +# Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname ips="ark0,ark1,ark2,ark3,ark4" -# conf/config/run_config.conf配置 -# 运行Master的机器 -# 注意:部署master的机器hostname列表 +# conf/config/run_config.conf config +# run master machine +# Note: list of hosts hostname for deploying master masters="ark0,ark1" -# 运行Worker的机器 -# 注意:部署worker的机器hostname列表 +# run worker machine +# note: list of machine hostnames for deploying workers workers="ark2,ark3,ark4" -# 运行Alert的机器 -# 注意:部署alert server的机器hostname列表 +# run alert machine +# note: list of machine hostnames for deploying alert server alertServer="ark3" -# 运行Api的机器 -# 注意:部署api server的机器hostname列表 +# run api machine +# note: list of machine hostnames for deploying api server apiServers="ark1" -# alert配置 -# 邮件协议 +# alert config +# mail protocol mailProtocol="SMTP" -# 邮件服务host +# mail server host mailServerHost="smtp.exmail.qq.com" -# 邮件服务端口 +# mail server port mailServerPort="25" -# 发送人 +# sender mailSender="xxxxxxxxxx" -# 发送人密码 +# sender password mailPassword="xxxxxxxxxx" -# TLS邮件协议支持 +# TLS mail protocol support starttlsEnable="false" -# SSL邮件协议支持 -# 注意:默认开启的是SSL协议,TLS和SSL只能有一个处于true状态 +# SSL mail protocol support +# note: The SSL protocol is enabled by default. +# only one of TLS and SSL can be in the true state. sslEnable="true" -# 下载Excel路径 +# download excel path xlsFilePath="/tmp/xls" -# 企业微信企业ID配置 +# Enterprise WeChat Enterprise ID Configuration enterpriseWechatCorpId="xxxxxxxxxx" -# 企业微信应用Secret配置 +# Enterprise WeChat application Secret configuration enterpriseWechatSecret="xxxxxxxxxx" -# 企业微信应用AgentId配置 +# Enterprise WeChat Application AgentId Configuration enterpriseWechatAgentId="xxxxxxxxxx" -# 企业微信用户配置,多个用户以,分割 +# Enterprise WeChat user configuration, multiple users to , split enterpriseWechatUsers="xxxxx,xxxxx" -#是否启动监控自启动脚本 +# whether to start monitoring self-starting scripts monitorServerState="false" -# 资源中心上传选择存储方式:HDFS,S3,NONE +# resource Center upload and select storage method:HDFS,S3,NONE resUploadStartupType="NONE" -# 如果resUploadStartupType为HDFS,defaultFS写namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下 -# 如果是S3,则写S3地址,比如说:s3a://escheduler,注意,一定要创建根目录/escheduler +# if resUploadStartupType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory. +# if S3,write S3 address,HA,for example :s3a://escheduler, +# Note,s3 be sure to create the root directory /escheduler defaultFS="hdfs://mycluster:8020" -# 如果配置了S3,则需要有以下配置 +# if S3 is configured, the following configuration is required. s3Endpoint="http://192.168.xx.xx:9010" s3AccessKey="xxxxxxxxxx" s3SecretKey="xxxxxxxxxx" -# resourcemanager HA配置,如果是单resourcemanager,这里为yarnHaIps="" +# resourcemanager HA configuration, if it is a single resourcemanager, here is yarnHaIps="" yarnHaIps="192.168.xx.xx,192.168.xx.xx" -# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好 +# if it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine. singleYarnIp="ark1" -# hdfs根路径,根路径的owner必须是部署用户。1.1.0之前版本不会自动创建hdfs根目录,需要自行创建 +# hdfs root path, the owner of the root path must be the deployment user. +# versions prior to 1.1.0 do not automatically create the hdfs root directory, you need to create it yourself. hdfsPath="/escheduler" -# 拥有在hdfs根路径/下创建目录权限的用户 -# 注意:如果开启了kerberos,则直接hdfsRootUser="",就可以 +# have users who create directory permissions under hdfs root path / +# Note: if kerberos is enabled, hdfsRootUser="" can be used directly. hdfsRootUser="hdfs" -# common 配置 -# 程序路径 +# common config +# Program root path programPath="/tmp/escheduler" -#下载路径 +# download path downloadPath="/tmp/escheduler/download" -# 任务执行路径 +# task execute path execPath="/tmp/escheduler/exec" -# SHELL环境变量路径 +# SHELL environmental variable path shellEnvPath="$installPath/conf/env/.escheduler_env.sh" -# 资源文件的后缀 +# suffix of the resource file resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml" -# 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除 +# development status, if true, for the SHELL script, you can view the encapsulated SHELL script in the execPath directory. +# If it is false, execute the direct delete devState="true" -# kerberos 配置 -# kerberos 是否启动 +# kerberos config +# kerberos whether to start kerberosStartUp="false" -# kdc krb5 配置文件路径 +# kdc krb5 config file path krb5ConfPath="$installPath/conf/krb5.conf" -# keytab 用户名 +# keytab username keytabUserName="hdfs-mycluster@ESZ.COM" -# 用户 keytab路径 +# username keytab path keytabPath="$installPath/conf/hdfs.headless.keytab" -# zk 配置 -# zk根目录 +# zk config +# zk root directory zkRoot="/escheduler" -# 用来记录挂掉机器的zk目录 +# used to record the zk directory of the hanging machine zkDeadServers="/escheduler/dead-servers" -# masters目录 -zkMasters="/escheduler/masters" +# masters directory +zkMasters="$zkRoot/masters" -# workers目录 -zkWorkers="/escheduler/workers" +# workers directory +zkWorkers="$zkRoot/workers" -# zk master分布式锁 -mastersLock="/escheduler/lock/masters" +# zk master distributed lock +mastersLock="$zkRoot/lock/masters" -# zk worker分布式锁 -workersLock="/escheduler/lock/workers" +# zk worker distributed lock +workersLock="$zkRoot/lock/workers" -# zk master容错分布式锁 -mastersFailover="/escheduler/lock/failover/masters" +# zk master fault-tolerant distributed lock +mastersFailover="$zkRoot/lock/failover/masters" -# zk worker容错分布式锁 -workersFailover="/escheduler/lock/failover/workers" +# zk worker fault-tolerant distributed lock +workersFailover="$zkRoot/lock/failover/workers" -# zk master启动容错分布式锁 -mastersStartupFailover="/escheduler/lock/failover/startup-masters" +# zk master start fault tolerant distributed lock +mastersStartupFailover="$zkRoot/lock/failover/startup-masters" -# zk session 超时 +# zk session timeout zkSessionTimeout="300" -# zk 连接超时 +# zk connection timeout zkConnectionTimeout="300" -# zk 重试间隔 +# zk retry interval zkRetrySleep="100" -# zk重试最大次数 +# zk retry maximum number of times zkRetryMaxtime="5" -# master 配置 -# master执行线程最大数,流程实例的最大并行度 +# master config +# master execution thread maximum number, maximum parallelism of process instance masterExecThreads="100" -# master任务执行线程最大数,每一个流程实例的最大并行度 +# the maximum number of master task execution threads, the maximum degree of parallelism for each process instance masterExecTaskNum="20" -# master心跳间隔 +# master heartbeat interval masterHeartbeatInterval="10" -# master任务提交重试次数 +# master task submission retries masterTaskCommitRetryTimes="5" -# master任务提交重试时间间隔 +# master task submission retry interval masterTaskCommitInterval="100" -# master最大cpu平均负载,用来判断master是否还有执行能力 +# master maximum cpu average load, used to determine whether the master has execution capability masterMaxCpuLoadAvg="10" -# master预留内存,用来判断master是否还有执行能力 +# master reserve memory to determine if the master has execution capability masterReservedMemory="1" -# worker 配置 -# worker执行线程 +# worker config +# worker execution thread workerExecThreads="100" -# worker心跳间隔 +# worker heartbeat interval workerHeartbeatInterval="10" -# worker一次抓取任务数 +# worker number of fetch tasks workerFetchTaskNum="3" -# worker最大cpu平均负载,用来判断worker是否还有执行能力,保持系统默认,默认为cpu核数的2倍,当负载达到2倍时, +# workerThe maximum cpu average load, used to determine whether the worker still has the ability to execute, +# keep the system default, the default is twice the number of cpu cores, when the load reaches 2 times #workerMaxCupLoadAvg="10" -# worker预留内存,用来判断master是否还有执行能力 +# worker reserve memory to determine if the master has execution capability workerReservedMemory="1" -# api 配置 -# api 服务端口 +# api config +# api server port apiServerPort="12345" -# api session 超时 +# api session timeout apiServerSessionTimeout="7200" -# api 上下文路径 +# api server context path apiServerContextPath="/escheduler/" -# spring 最大文件大小 +# spring max file size springMaxFileSize="1024MB" -# spring 最大请求文件大小 +# spring max request size springMaxRequestSize="1024MB" -# api 最大post请求大小 +# api max http post size apiMaxHttpPostSize="5000000" -# 1,替换文件 -echo "1,替换文件" +# 1,replace file +echo "1,replace file" sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${mysqlUserName}#g" conf/dao/data_source.properties sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${mysqlPassword}#g" conf/dao/data_source.properties @@ -375,8 +380,8 @@ sed -i ${txt} "s#alertServer.*#alertServer=${alertServer}#g" conf/config/run_con sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf -# 2,创建目录 -echo "2,创建目录" +# 2,create directory +echo "2,create directory" if [ ! -d $installPath ];then sudo mkdir -p $installPath @@ -387,22 +392,22 @@ hostsArr=(${ips//,/ }) for host in ${hostsArr[@]} do -# 如果programPath不存在,则创建 +# create if programPath does not exist if ! ssh $host test -e $programPath; then ssh $host "sudo mkdir -p $programPath;sudo chown -R $deployUser:$deployUser $programPath" fi -# 如果downloadPath不存在,则创建 +# create if downloadPath does not exist if ! ssh $host test -e $downloadPath; then ssh $host "sudo mkdir -p $downloadPath;sudo chown -R $deployUser:$deployUser $downloadPath" fi -# 如果$execPath不存在,则创建 +# create if execPath does not exist if ! ssh $host test -e $execPath; then ssh $host "sudo mkdir -p $execPath; sudo chown -R $deployUser:$deployUser $execPath" fi -# 如果$xlsFilePath不存在,则创建 +# create if xlsFilePath does not exist if ! ssh $host test -e $xlsFilePath; then ssh $host "sudo mkdir -p $xlsFilePath; sudo chown -R $deployUser:$deployUser $xlsFilePath" fi @@ -410,31 +415,31 @@ fi done -# 3,停止服务 -echo "3,停止服务" -sh ${workDir}/script/stop_all.sh +# 3,stop server +echo "3,stop server" +sh ${workDir}/script/stop-all.sh -# 4,删除zk节点 -echo "4,删除zk节点" +# 4,delete zk node +echo "4,delete zk node" sleep 1 -python ${workDir}/script/del_zk_node.py $zkQuorum $zkRoot +python ${workDir}/script/del-zk-node.py $zkQuorum $zkRoot -# 5,scp资源 -echo "5,scp资源" -sh ${workDir}/script/scp_hosts.sh +# 5,scp resources +echo "5,scp resources" +sh ${workDir}/script/scp-hosts.sh if [ $? -eq 0 ] then - echo 'scp拷贝完成' + echo 'scp copy completed' else - echo 'sc 拷贝失败退出' + echo 'sc copy failed to exit' exit -1 fi -# 6,启动 -echo "6,启动" -sh ${workDir}/script/start_all.sh +# 6,startup +echo "6,startup" +sh ${workDir}/script/start-all.sh -# 7,启动监控自启动脚本 +# 7,start monitoring self-starting script monitor_pid=${workDir}/monitor_server.pid if [ "true" = $monitorServerState ];then if [ -f $monitor_pid ]; then @@ -453,9 +458,8 @@ if [ "true" = $monitorServerState ];then echo "monitor server running as process ${TARGET_PID}.Stopped success" rm -f $monitor_pid fi - nohup python -u ${workDir}/script/monitor_server.py $installPath $zkQuorum $zkMasters $zkWorkers > ${workDir}/monitor_server.log 2>&1 & + nohup python -u ${workDir}/script/monitor-server.py $installPath $zkQuorum $zkMasters $zkWorkers > ${workDir}/monitor-server.log 2>&1 & echo $! > $monitor_pid echo "start monitor server success as process `cat $monitor_pid`" -fi - +fi \ No newline at end of file diff --git a/package.xml b/package.xml index 619dfb07cf..153dceec9b 100644 --- a/package.xml +++ b/package.xml @@ -70,8 +70,8 @@ script - start_all.sh - stop_all.sh + start-all.sh + stop-all.sh escheduler-daemon.sh ./bin diff --git a/pom.xml b/pom.xml index f8f64313d8..1d1858cd52 100644 --- a/pom.xml +++ b/pom.xml @@ -20,6 +20,39 @@ 2.7.3 2.2.3 2.9.8 + 3.5.1 + 2.0.1 + 5.0.5 + 1.2.61 + 1.1.14 + 1.3.163 + 1.6 + 1.1.1 + 4.4.1 + 4.4.1 + 4.12 + 5.1.34 + 1.7.5 + 1.7.5 + 3.2.2 + 2.3 + 3.5 + 3.0.1 + 1.7.0 + 1.10 + 1.5 + 3.17 + 2.3.21 + 3.1.0 + 4.1 + 20.0 + 42.1.4 + 2.1.0 + 2.4 + 3.5.0 + 0.1.52 + 6.1.0.jre8 + 6.1.14 @@ -28,22 +61,22 @@ org.mybatis mybatis - 3.5.1 + ${mybatis.version} org.mybatis mybatis-spring - 2.0.1 + ${mybatis.spring.version} org.mybatis.spring.boot mybatis-spring-boot-autoconfigure - 2.0.1 + ${mybatis.spring.version} org.mybatis.spring.boot mybatis-spring-boot-starter - 2.0.1 + ${mybatis.spring.version} @@ -60,18 +93,18 @@ com.cronutils cron-utils - 5.0.5 + ${cron.utils.version} com.alibaba fastjson - 1.2.29 + ${fastjson.version} com.alibaba druid - 1.1.14 + ${druid.version} @@ -116,7 +149,7 @@ com.h2database h2 - 1.3.163 + ${h2.version} test @@ -165,22 +198,22 @@ commons-codec commons-codec - 1.6 + ${commons.codec.version} commons-logging commons-logging - 1.1.1 + ${commons.logging.version} org.apache.httpcomponents httpclient - 4.4.1 + ${httpclient.version} org.apache.httpcomponents httpcore - 4.4.1 + ${httpcore.version} com.fasterxml.jackson.core @@ -201,56 +234,56 @@ junit junit - 4.12 + ${junit.version} mysql mysql-connector-java - 5.1.34 + ${mysql.connector.version} org.slf4j slf4j-api - 1.7.5 + ${slf4j.api.version} org.slf4j slf4j-log4j12 - 1.7.5 + ${slf4j.log4j12.version} commons-collections commons-collections - 3.2.2 + ${commons.collections.version} commons-lang commons-lang - 2.3 + ${commons.lang.version} org.apache.commons commons-lang3 - 3.5 + ${commons.lang3.version} commons-httpclient commons-httpclient - 3.0.1 + ${commons.httpclient} commons-beanutils commons-beanutils - 1.7.0 + ${commons.beanutils.version} commons-configuration commons-configuration - 1.10 + ${commons.configuration.version} @@ -268,20 +301,20 @@ org.apache.commons commons-email - 1.5 + ${commons.email.version} org.apache.poi poi - 3.17 + ${poi.version} org.freemarker freemarker - 2.3.21 + ${freemarker.version} @@ -325,61 +358,61 @@ javax.servlet javax.servlet-api - 3.1.0 + ${javax.servlet.api.version} org.apache.commons commons-collections4 - 4.1 + ${commons.collections4.version} com.google.guava guava - 20.0 + ${guava.version} org.postgresql postgresql - 42.1.4 + ${postgresql.version} org.apache.hive hive-jdbc - 2.1.0 + ${hive.jdbc.version} commons-io commons-io - 2.4 + ${commons.io.version} com.github.oshi oshi-core - 3.5.0 + ${oshi.core.version} ru.yandex.clickhouse clickhouse-jdbc - 0.1.52 + ${clickhouse.jdbc.version} com.microsoft.sqlserver mssql-jdbc - 6.1.0.jre8 + ${mssql.jdbc.version} org.mortbay.jetty jsp-2.1 - 6.1.14 + ${jsp.version} @@ -489,4 +522,4 @@ escheduler-rpc -
\ No newline at end of file +
diff --git a/script/create_escheduler.sh b/script/create-escheduler.sh similarity index 91% rename from script/create_escheduler.sh rename to script/create-escheduler.sh index c88da7bb6d..ded20a29b3 100644 --- a/script/create_escheduler.sh +++ b/script/create-escheduler.sh @@ -13,7 +13,7 @@ export ESCHEDULER_LIB_JARS=$ESCHEDULER_HOME/lib/* export ESCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70" export STOP_TIMEOUT=5 -CLASS=cn.escheduler.dao.upgrade.shell.CreateEscheduler +CLASS=cn.escheduler.dao.upgrade.shell.CreateDolphinScheduler exec_command="$ESCHEDULER_OPTS -classpath $ESCHEDULER_CONF_DIR:$ESCHEDULER_LIB_JARS $CLASS" diff --git a/script/del_zk_node.py b/script/del-zk-node.py similarity index 100% rename from script/del_zk_node.py rename to script/del-zk-node.py diff --git a/script/env/.escheduler_env.sh b/script/env/.escheduler_env.sh index 5a08343c84..e1975816d9 100644 --- a/script/env/.escheduler_env.sh +++ b/script/env/.escheduler_env.sh @@ -5,5 +5,5 @@ export SPARK_HOME2=/opt/soft/spark2 export PYTHON_HOME=/opt/soft/python export JAVA_HOME=/opt/soft/java export HIVE_HOME=/opt/soft/hive - -export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH \ No newline at end of file +export FLINK_HOME=/opt/soft/flink +export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$PATH diff --git a/script/escheduler-daemon.sh b/script/escheduler-daemon.sh index 99f0dee444..d54272c886 100644 --- a/script/escheduler-daemon.sh +++ b/script/escheduler-daemon.sh @@ -47,7 +47,7 @@ elif [ "$command" = "master-server" ]; then LOG_FILE="-Dspring.config.location=conf/application_master.properties -Ddruid.mysql.usePingMethod=false" CLASS=cn.escheduler.server.master.MasterServer elif [ "$command" = "worker-server" ]; then - LOG_FILE="-Dlogback.configurationFile=conf/worker_logback.xml -Ddruid.mysql.usePingMethod=false" + LOG_FILE="-Dspring.config.location=conf/application_worker.properties -Ddruid.mysql.usePingMethod=false" CLASS=cn.escheduler.server.worker.WorkerServer elif [ "$command" = "alert-server" ]; then LOG_FILE="-Dlogback.configurationFile=conf/alert_logback.xml" diff --git a/script/monitor_server.py b/script/monitor-server.py similarity index 74% rename from script/monitor_server.py rename to script/monitor-server.py index 5f236cac7e..546104c8f6 100644 --- a/script/monitor_server.py +++ b/script/monitor-server.py @@ -1,21 +1,26 @@ #!/usr/bin/env python # -*- coding:utf-8 -*- -# Author:qiaozhanwei ''' -yum 安装pip +1, yum install pip yum -y install python-pip -pip install kazoo 安装 -conda install -c conda-forge kazoo 安装 +2, pip install kazoo +pip install kazoo -运行脚本及参数说明: +or + +3, conda install kazoo +conda install -c conda-forge kazoo + +run script and parameter description: nohup python -u monitor_server.py /data1_1T/escheduler 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 /escheduler/masters /escheduler/workers> monitor_server.log 2>&1 & -参数说明如下: -/data1_1T/escheduler的值来自install.sh中的installPath -192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181的值来自install.sh中的zkQuorum -/escheduler/masters的值来自install.sh中的zkMasters -/escheduler/workers的值来自install.sh中的zkWorkers +the parameters are as follows: +/data1_1T/escheduler : the value comes from the installPath in install.sh +192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 : the value comes from zkQuorum in install.sh +the value comes from zkWorkers in install.sh +/escheduler/masters : the value comes from zkMasters in install.sh +/escheduler/workers : the value comes from zkWorkers in install.sh ''' import sys import socket @@ -29,11 +34,11 @@ schedule = sched.scheduler(time.time, time.sleep) class ZkClient: def __init__(self): - # hosts配置zk地址集群 + # hosts configuration zk address cluster self.zk = KazooClient(hosts=zookeepers) self.zk.start() - # 读取配置文件,组装成字典 + # read configuration files and assemble them into a dictionary def read_file(self,path): with open(path, 'r') as f: dict = {} @@ -43,11 +48,11 @@ class ZkClient: dict[arr[0]] = arr[1] return dict - # 根据hostname获取ip地址 + # get the ip address according to hostname def get_ip_by_hostname(self,hostname): return socket.gethostbyname(hostname) - # 重启服务 + # restart server def restart_server(self,inc): config_dict = self.read_file(install_path + '/conf/config/run_config.conf') @@ -67,7 +72,7 @@ class ZkClient: restart_master_list = list(set(master_list) - set(zk_master_list)) if (len(restart_master_list) != 0): for master in restart_master_list: - print("master " + self.get_ip_by_hostname(master) + " 服务已经掉了") + print("master " + self.get_ip_by_hostname(master) + " server has down") os.system('ssh ' + self.get_ip_by_hostname(master) + ' sh ' + install_path + '/bin/escheduler-daemon.sh start master-server') if (self.zk.exists(workers_zk_path)): @@ -78,15 +83,15 @@ class ZkClient: restart_worker_list = list(set(worker_list) - set(zk_worker_list)) if (len(restart_worker_list) != 0): for worker in restart_worker_list: - print("worker " + self.get_ip_by_hostname(worker) + " 服务已经掉了") + print("worker " + self.get_ip_by_hostname(worker) + " server has down") os.system('ssh ' + self.get_ip_by_hostname(worker) + ' sh ' + install_path + '/bin/escheduler-daemon.sh start worker-server') print(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) schedule.enter(inc, 0, self.restart_server, (inc,)) - # 默认参数60s + # default parameter 60s def main(self,inc=60): - # enter四个参数分别为:间隔事件、优先级(用于同时间到达的两个事件同时执行时定序)、被调用触发的函数, - # 给该触发函数的参数(tuple形式) + # the enter four parameters are: interval event, priority (sequence for simultaneous execution of two events arriving at the same time), function triggered by the call, + # the argument to the trigger function (tuple form) schedule.enter(0, 0, self.restart_server, (inc,)) schedule.run() if __name__ == '__main__': @@ -97,4 +102,4 @@ if __name__ == '__main__': masters_zk_path = sys.argv[3] workers_zk_path = sys.argv[4] zkClient = ZkClient() - zkClient.main(300) + zkClient.main(300) \ No newline at end of file diff --git a/script/scp_hosts.sh b/script/scp-hosts.sh old mode 100755 new mode 100644 similarity index 100% rename from script/scp_hosts.sh rename to script/scp-hosts.sh diff --git a/script/start_all.sh b/script/start-all.sh old mode 100755 new mode 100644 similarity index 100% rename from script/start_all.sh rename to script/start-all.sh diff --git a/script/stop_all.sh b/script/stop-all.sh old mode 100755 new mode 100644 similarity index 100% rename from script/stop_all.sh rename to script/stop-all.sh diff --git a/script/upgrade_escheduler.sh b/script/upgrade-escheduler.sh similarity index 91% rename from script/upgrade_escheduler.sh rename to script/upgrade-escheduler.sh index 6bd6439a58..453bd611ac 100644 --- a/script/upgrade_escheduler.sh +++ b/script/upgrade-escheduler.sh @@ -13,7 +13,7 @@ export ESCHEDULER_LIB_JARS=$ESCHEDULER_HOME/lib/* export ESCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70" export STOP_TIMEOUT=5 -CLASS=cn.escheduler.dao.upgrade.shell.UpgradeEscheduler +CLASS=cn.escheduler.dao.upgrade.shell.UpgradeDolphinScheduler exec_command="$ESCHEDULER_OPTS -classpath $ESCHEDULER_CONF_DIR:$ESCHEDULER_LIB_JARS $CLASS" diff --git a/sql/create/release-1.0.0_schema/mysql/escheduler_ddl.sql b/sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql similarity index 100% rename from sql/create/release-1.0.0_schema/mysql/escheduler_ddl.sql rename to sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql diff --git a/sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql b/sql/create/release-1.0.0_schema/mysql/dolphinscheduler_dml.sql similarity index 100% rename from sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql rename to sql/create/release-1.0.0_schema/mysql/dolphinscheduler_dml.sql diff --git a/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_ddl.sql b/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_ddl.sql new file mode 100644 index 0000000000..3dc3a5b9a3 --- /dev/null +++ b/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_ddl.sql @@ -0,0 +1,804 @@ +DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; +CREATE TABLE QRTZ_BLOB_TRIGGERS ( + SCHED_NAME varchar(120) NOT NULL, + TRIGGER_NAME varchar(200) NOT NULL, + TRIGGER_GROUP varchar(200) NOT NULL, + BLOB_DATA bytea NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) +); + +-- +-- Table structure for table QRTZ_CALENDARS +-- + +DROP TABLE IF EXISTS QRTZ_CALENDARS; +CREATE TABLE QRTZ_CALENDARS ( + SCHED_NAME varchar(120) NOT NULL, + CALENDAR_NAME varchar(200) NOT NULL, + CALENDAR bytea NOT NULL, + PRIMARY KEY (SCHED_NAME,CALENDAR_NAME) +); +-- +-- Table structure for table QRTZ_CRON_TRIGGERS +-- + +DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; +CREATE TABLE QRTZ_CRON_TRIGGERS ( + SCHED_NAME varchar(120) NOT NULL, + TRIGGER_NAME varchar(200) NOT NULL, + TRIGGER_GROUP varchar(200) NOT NULL, + CRON_EXPRESSION varchar(120) NOT NULL, + TIME_ZONE_ID varchar(80) DEFAULT NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) +); + +-- +-- Table structure for table QRTZ_FIRED_TRIGGERS +-- + +DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; +CREATE TABLE QRTZ_FIRED_TRIGGERS ( + SCHED_NAME varchar(120) NOT NULL, + ENTRY_ID varchar(95) NOT NULL, + TRIGGER_NAME varchar(200) NOT NULL, + TRIGGER_GROUP varchar(200) NOT NULL, + INSTANCE_NAME varchar(200) NOT NULL, + FIRED_TIME bigint NOT NULL, + SCHED_TIME bigint NOT NULL, + PRIORITY int NOT NULL, + STATE varchar(16) NOT NULL, + JOB_NAME varchar(200) DEFAULT NULL, + JOB_GROUP varchar(200) DEFAULT NULL, + IS_NONCONCURRENT varchar(1) DEFAULT NULL, + REQUESTS_RECOVERY varchar(1) DEFAULT NULL, + PRIMARY KEY (SCHED_NAME,ENTRY_ID) +) ; + create index IDX_QRTZ_FT_TRIG_INST_NAME on QRTZ_FIRED_TRIGGERS (SCHED_NAME,INSTANCE_NAME); + create index IDX_QRTZ_FT_INST_JOB_REQ_RCVRY on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); + create index IDX_QRTZ_FT_J_G on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); + create index IDX_QRTZ_FT_JG on QRTZ_FIRED_TRIGGERS (SCHED_NAME,JOB_GROUP); + create index IDX_QRTZ_FT_T_G on QRTZ_FIRED_TRIGGERS (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); + create index IDX_QRTZ_FT_TG on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); + +-- +-- Table structure for table QRTZ_LOCKS +-- + +DROP TABLE IF EXISTS QRTZ_LOCKS; +CREATE TABLE QRTZ_LOCKS ( + SCHED_NAME varchar(120) NOT NULL, + LOCK_NAME varchar(40) NOT NULL, + PRIMARY KEY (SCHED_NAME,LOCK_NAME) +) ; + +-- +-- Table structure for table QRTZ_PAUSED_TRIGGER_GRPS +-- + +DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; +CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( + SCHED_NAME varchar(120) NOT NULL, + TRIGGER_GROUP varchar(200) NOT NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP) +) ; + +-- +-- Table structure for table QRTZ_SCHEDULER_STATE +-- + +DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; +CREATE TABLE QRTZ_SCHEDULER_STATE ( + SCHED_NAME varchar(120) NOT NULL, + INSTANCE_NAME varchar(200) NOT NULL, + LAST_CHECKIN_TIME bigint NOT NULL, + CHECKIN_INTERVAL bigint NOT NULL, + PRIMARY KEY (SCHED_NAME,INSTANCE_NAME) +) ; + +-- +-- Table structure for table QRTZ_SIMPLE_TRIGGERS +-- + +DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; +CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( + SCHED_NAME varchar(120) NOT NULL, + TRIGGER_NAME varchar(200) NOT NULL, + TRIGGER_GROUP varchar(200) NOT NULL, + REPEAT_COUNT bigint NOT NULL, + REPEAT_INTERVAL bigint NOT NULL, + TIMES_TRIGGERED bigint NOT NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) + +) ; + +-- +-- Table structure for table QRTZ_SIMPROP_TRIGGERS +-- + +DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; +CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( + SCHED_NAME varchar(120) NOT NULL, + TRIGGER_NAME varchar(200) NOT NULL, + TRIGGER_GROUP varchar(200) NOT NULL, + STR_PROP_1 varchar(512) DEFAULT NULL, + STR_PROP_2 varchar(512) DEFAULT NULL, + STR_PROP_3 varchar(512) DEFAULT NULL, + INT_PROP_1 int DEFAULT NULL, + INT_PROP_2 int DEFAULT NULL, + LONG_PROP_1 bigint DEFAULT NULL, + LONG_PROP_2 bigint DEFAULT NULL, + DEC_PROP_1 decimal(13,4) DEFAULT NULL, + DEC_PROP_2 decimal(13,4) DEFAULT NULL, + BOOL_PROP_1 varchar(1) DEFAULT NULL, + BOOL_PROP_2 varchar(1) DEFAULT NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) +) ; + +-- +-- Table structure for table QRTZ_TRIGGERS +-- + +DROP TABLE IF EXISTS QRTZ_TRIGGERS; +CREATE TABLE QRTZ_TRIGGERS ( + SCHED_NAME varchar(120) NOT NULL, + TRIGGER_NAME varchar(200) NOT NULL, + TRIGGER_GROUP varchar(200) NOT NULL, + JOB_NAME varchar(200) NOT NULL, + JOB_GROUP varchar(200) NOT NULL, + DESCRIPTION varchar(250) DEFAULT NULL, + NEXT_FIRE_TIME bigint DEFAULT NULL, + PREV_FIRE_TIME bigint DEFAULT NULL, + PRIORITY int DEFAULT NULL, + TRIGGER_STATE varchar(16) NOT NULL, + TRIGGER_TYPE varchar(8) NOT NULL, + START_TIME bigint NOT NULL, + END_TIME bigint DEFAULT NULL, + CALENDAR_NAME varchar(200) DEFAULT NULL, + MISFIRE_INSTR smallint DEFAULT NULL, + JOB_DATA bytea, + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) +) ; + + create index IDX_QRTZ_T_J on QRTZ_TRIGGERS (SCHED_NAME,JOB_NAME,JOB_GROUP); + create index IDX_QRTZ_T_JG on QRTZ_TRIGGERS (SCHED_NAME,JOB_GROUP); + create index IDX_QRTZ_T_C on QRTZ_TRIGGERS (SCHED_NAME,CALENDAR_NAME); + create index IDX_QRTZ_T_G on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_GROUP); + create index IDX_QRTZ_T_STATE on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_STATE); + create index IDX_QRTZ_T_N_STATE on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); + create index IDX_QRTZ_T_N_G_STATE on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); + create index IDX_QRTZ_T_NEXT_FIRE_TIME on QRTZ_TRIGGERS (SCHED_NAME,NEXT_FIRE_TIME); + create index IDX_QRTZ_T_NFT_ST on QRTZ_TRIGGERS (SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); + create index IDX_QRTZ_T_NFT_MISFIRE on QRTZ_TRIGGERS (SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); + create index IDX_QRTZ_T_NFT_ST_MISFIRE on QRTZ_TRIGGERS (SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); + create index IDX_QRTZ_T_NFT_ST_MISFIRE_GRP on QRTZ_TRIGGERS (SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); + + +-- +-- Table structure for table QRTZ_JOB_DETAILS +-- + +DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; +CREATE TABLE QRTZ_JOB_DETAILS ( + SCHED_NAME varchar(120) NOT NULL, + JOB_NAME varchar(200) NOT NULL, + JOB_GROUP varchar(200) NOT NULL, + DESCRIPTION varchar(250) DEFAULT NULL, + JOB_CLASS_NAME varchar(250) NOT NULL, + IS_DURABLE varchar(1) NOT NULL, + IS_NONCONCURRENT varchar(1) NOT NULL, + IS_UPDATE_DATA varchar(1) NOT NULL, + REQUESTS_RECOVERY varchar(1) NOT NULL, + JOB_DATA bytea, + PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) +) ; + create index IDX_QRTZ_J_REQ_RECOVERY on QRTZ_JOB_DETAILS (SCHED_NAME,REQUESTS_RECOVERY); + create index IDX_QRTZ_J_GRP on QRTZ_JOB_DETAILS (SCHED_NAME,JOB_GROUP); + +alter table QRTZ_BLOB_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_BLOB_TRIGGERS_ibfk_1; +alter table QRTZ_BLOB_TRIGGERS add CONSTRAINT QRTZ_BLOB_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP); + +alter table QRTZ_CRON_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_CRON_TRIGGERS_ibfk_1; +alter table QRTZ_CRON_TRIGGERS add CONSTRAINT QRTZ_CRON_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP); + +alter table QRTZ_SIMPLE_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_SIMPLE_TRIGGERS_ibfk_1; +alter table QRTZ_SIMPLE_TRIGGERS add CONSTRAINT QRTZ_SIMPLE_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP); + +alter table QRTZ_SIMPROP_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_SIMPROP_TRIGGERS_ibfk_1; +alter table QRTZ_SIMPROP_TRIGGERS add CONSTRAINT QRTZ_SIMPROP_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP); + +alter table QRTZ_TRIGGERS drop CONSTRAINT if EXISTS QRTZ_TRIGGERS_ibfk_1; +alter table QRTZ_TRIGGERS add CONSTRAINT QRTZ_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, JOB_NAME, JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS (SCHED_NAME, JOB_NAME, JOB_GROUP); + + + +-- +-- Table structure for table t_escheduler_access_token +-- + +DROP TABLE IF EXISTS t_escheduler_access_token; +CREATE TABLE t_escheduler_access_token ( + id int NOT NULL , + user_id int DEFAULT NULL , + token varchar(64) DEFAULT NULL , + expire_time timestamp DEFAULT NULL , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_alert +-- + +DROP TABLE IF EXISTS t_escheduler_alert; +CREATE TABLE t_escheduler_alert ( + id int NOT NULL , + title varchar(64) DEFAULT NULL , + show_type int DEFAULT NULL , + content text , + alert_type int DEFAULT NULL , + alert_status int DEFAULT '0' , + log text , + alertgroup_id int DEFAULT NULL , + receivers text , + receivers_cc text , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; +-- +-- Table structure for table t_escheduler_alertgroup +-- + +DROP TABLE IF EXISTS t_escheduler_alertgroup; +CREATE TABLE t_escheduler_alertgroup ( + id int NOT NULL , + group_name varchar(255) DEFAULT NULL , + group_type int DEFAULT NULL , + "desc" varchar(255) DEFAULT NULL , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_command +-- + +DROP TABLE IF EXISTS t_escheduler_command; +CREATE TABLE t_escheduler_command ( + id int NOT NULL , + command_type int DEFAULT NULL , + process_definition_id int DEFAULT NULL , + command_param text , + task_depend_type int DEFAULT NULL , + failure_strategy int DEFAULT '0' , + warning_type int DEFAULT '0' , + warning_group_id int DEFAULT NULL , + schedule_time timestamp DEFAULT NULL , + start_time timestamp DEFAULT NULL , + executor_id int DEFAULT NULL , + dependence varchar(255) DEFAULT NULL , + update_time timestamp DEFAULT NULL , + process_instance_priority int DEFAULT NULL , + worker_group_id int DEFAULT '-1' , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_datasource +-- + +DROP TABLE IF EXISTS t_escheduler_datasource; +CREATE TABLE t_escheduler_datasource ( + id int NOT NULL , + name varchar(64) NOT NULL , + note varchar(256) DEFAULT NULL , + type int NOT NULL , + user_id int NOT NULL , + connection_params text NOT NULL , + create_time timestamp NOT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_error_command +-- + +DROP TABLE IF EXISTS t_escheduler_error_command; +CREATE TABLE t_escheduler_error_command ( + id int NOT NULL , + command_type int DEFAULT NULL , + executor_id int DEFAULT NULL , + process_definition_id int DEFAULT NULL , + command_param text , + task_depend_type int DEFAULT NULL , + failure_strategy int DEFAULT '0' , + warning_type int DEFAULT '0' , + warning_group_id int DEFAULT NULL , + schedule_time timestamp DEFAULT NULL , + start_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + dependence text , + process_instance_priority int DEFAULT NULL , + worker_group_id int DEFAULT '-1' , + message text , + PRIMARY KEY (id) +); +-- +-- Table structure for table t_escheduler_master_server +-- + +DROP TABLE IF EXISTS t_escheduler_master_server; +CREATE TABLE t_escheduler_master_server ( + id int NOT NULL , + host varchar(45) DEFAULT NULL , + port int DEFAULT NULL , + zk_directory varchar(64) DEFAULT NULL , + res_info varchar(256) DEFAULT NULL , + create_time timestamp DEFAULT NULL , + last_heartbeat_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_process_definition +-- + +DROP TABLE IF EXISTS t_escheduler_process_definition; +CREATE TABLE t_escheduler_process_definition ( + id int NOT NULL , + name varchar(255) DEFAULT NULL , + version int DEFAULT NULL , + release_state int DEFAULT NULL , + project_id int DEFAULT NULL , + user_id int DEFAULT NULL , + process_definition_json text , + "desc" text , + global_params text , + flag int DEFAULT NULL , + locations text , + connects text , + receivers text , + receivers_cc text , + create_time timestamp DEFAULT NULL , + timeout int DEFAULT '0' , + tenant_id int NOT NULL DEFAULT '-1' , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; + +create index process_definition_index on t_escheduler_process_definition (project_id,id); + +-- +-- Table structure for table t_escheduler_process_instance +-- + +DROP TABLE IF EXISTS t_escheduler_process_instance; +CREATE TABLE t_escheduler_process_instance ( + id int NOT NULL , + name varchar(255) DEFAULT NULL , + process_definition_id int DEFAULT NULL , + state int DEFAULT NULL , + recovery int DEFAULT NULL , + start_time timestamp DEFAULT NULL , + end_time timestamp DEFAULT NULL , + run_times int DEFAULT NULL , + host varchar(45) DEFAULT NULL , + command_type int DEFAULT NULL , + command_param text , + task_depend_type int DEFAULT NULL , + max_try_times int DEFAULT '0' , + failure_strategy int DEFAULT '0' , + warning_type int DEFAULT '0' , + warning_group_id int DEFAULT NULL , + schedule_time timestamp DEFAULT NULL , + command_start_time timestamp DEFAULT NULL , + global_params text , + process_instance_json text , + flag int DEFAULT '1' , + update_time timestamp NULL , + is_sub_process int DEFAULT '0' , + executor_id int NOT NULL , + locations text , + connects text , + history_cmd text , + dependence_schedule_times text , + process_instance_priority int DEFAULT NULL , + worker_group_id int DEFAULT '-1' , + timeout int DEFAULT '0' , + tenant_id int NOT NULL DEFAULT '-1' , + PRIMARY KEY (id) +) ; + create index process_instance_index on t_escheduler_process_instance (process_definition_id,id); + create index start_time_index on t_escheduler_process_instance (start_time); + +-- +-- Table structure for table t_escheduler_project +-- + +DROP TABLE IF EXISTS t_escheduler_project; +CREATE TABLE t_escheduler_project ( + id int NOT NULL , + name varchar(100) DEFAULT NULL , + ”desc“ varchar(200) DEFAULT NULL , + user_id int DEFAULT NULL , + flag int DEFAULT '1' , + create_time timestamp DEFAULT CURRENT_TIMESTAMP , + update_time timestamp DEFAULT CURRENT_TIMESTAMP , + PRIMARY KEY (id) +) ; + create index user_id_index on t_escheduler_project (user_id); + +-- +-- Table structure for table t_escheduler_queue +-- + +DROP TABLE IF EXISTS t_escheduler_queue; +CREATE TABLE t_escheduler_queue ( + id int NOT NULL , + queue_name varchar(64) DEFAULT NULL , + queue varchar(64) DEFAULT NULL , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +); + + +-- +-- Table structure for table t_escheduler_relation_datasource_user +-- + +DROP TABLE IF EXISTS t_escheduler_relation_datasource_user; +CREATE TABLE t_escheduler_relation_datasource_user ( + id int NOT NULL , + user_id int NOT NULL , + datasource_id int DEFAULT NULL , + perm int DEFAULT '1' , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; +; + +-- +-- Table structure for table t_escheduler_relation_process_instance +-- + +DROP TABLE IF EXISTS t_escheduler_relation_process_instance; +CREATE TABLE t_escheduler_relation_process_instance ( + id int NOT NULL , + parent_process_instance_id int DEFAULT NULL , + parent_task_instance_id int DEFAULT NULL , + process_instance_id int DEFAULT NULL , + PRIMARY KEY (id) +) ; + + +-- +-- Table structure for table t_escheduler_relation_project_user +-- + +DROP TABLE IF EXISTS t_escheduler_relation_project_user; +CREATE TABLE t_escheduler_relation_project_user ( + id int NOT NULL , + user_id int NOT NULL , + project_id int DEFAULT NULL , + perm int DEFAULT '1' , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; +create index relation_project_user_id_index on t_escheduler_relation_project_user (user_id); + +-- +-- Table structure for table t_escheduler_relation_resources_user +-- + +DROP TABLE IF EXISTS t_escheduler_relation_resources_user; +CREATE TABLE t_escheduler_relation_resources_user ( + id int NOT NULL , + user_id int NOT NULL , + resources_id int DEFAULT NULL , + perm int DEFAULT '1' , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_relation_udfs_user +-- + +DROP TABLE IF EXISTS t_escheduler_relation_udfs_user; +CREATE TABLE t_escheduler_relation_udfs_user ( + id int NOT NULL , + user_id int NOT NULL , + udf_id int DEFAULT NULL , + perm int DEFAULT '1' , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; +; + +-- +-- Table structure for table t_escheduler_relation_user_alertgroup +-- + +DROP TABLE IF EXISTS t_escheduler_relation_user_alertgroup; +CREATE TABLE t_escheduler_relation_user_alertgroup ( + id int NOT NULL, + alertgroup_id int DEFAULT NULL, + user_id int DEFAULT NULL, + create_time timestamp DEFAULT NULL, + update_time timestamp DEFAULT NULL, + PRIMARY KEY (id) +); + +-- +-- Table structure for table t_escheduler_resources +-- + +DROP TABLE IF EXISTS t_escheduler_resources; +CREATE TABLE t_escheduler_resources ( + id int NOT NULL , + alias varchar(64) DEFAULT NULL , + file_name varchar(64) DEFAULT NULL , + "desc" varchar(256) DEFAULT NULL , + user_id int DEFAULT NULL , + type int DEFAULT NULL , + size bigint DEFAULT NULL , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; +; + +-- +-- Table structure for table t_escheduler_schedules +-- + +DROP TABLE IF EXISTS t_escheduler_schedules; +CREATE TABLE t_escheduler_schedules ( + id int NOT NULL , + process_definition_id int NOT NULL , + start_time timestamp NOT NULL , + end_time timestamp NOT NULL , + crontab varchar(256) NOT NULL , + failure_strategy int NOT NULL , + user_id int NOT NULL , + release_state int NOT NULL , + warning_type int NOT NULL , + warning_group_id int DEFAULT NULL , + process_instance_priority int DEFAULT NULL , + worker_group_id int DEFAULT '-1' , + create_time timestamp NOT NULL , + update_time timestamp NOT NULL , + PRIMARY KEY (id) +); + +-- +-- Table structure for table t_escheduler_session +-- + +DROP TABLE IF EXISTS t_escheduler_session; +CREATE TABLE t_escheduler_session ( + id varchar(64) NOT NULL , + user_id int DEFAULT NULL , + ip varchar(45) DEFAULT NULL , + last_login_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +); + +-- +-- Table structure for table t_escheduler_task_instance +-- + +DROP TABLE IF EXISTS t_escheduler_task_instance; +CREATE TABLE t_escheduler_task_instance ( + id int NOT NULL , + name varchar(255) DEFAULT NULL , + task_type varchar(64) DEFAULT NULL , + process_definition_id int DEFAULT NULL , + process_instance_id int DEFAULT NULL , + task_json text , + state int DEFAULT NULL , + submit_time timestamp DEFAULT NULL , + start_time timestamp DEFAULT NULL , + end_time timestamp DEFAULT NULL , + host varchar(45) DEFAULT NULL , + execute_path varchar(200) DEFAULT NULL , + log_path varchar(200) DEFAULT NULL , + alert_flag int DEFAULT NULL , + retry_times int DEFAULT '0' , + pid int DEFAULT NULL , + app_link varchar(255) DEFAULT NULL , + flag int DEFAULT '1' , + retry_interval int DEFAULT NULL , + max_retry_times int DEFAULT NULL , + task_instance_priority int DEFAULT NULL , + worker_group_id int DEFAULT '-1' , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_tenant +-- + +DROP TABLE IF EXISTS t_escheduler_tenant; +CREATE TABLE t_escheduler_tenant ( + id int NOT NULL , + tenant_code varchar(64) DEFAULT NULL , + tenant_name varchar(64) DEFAULT NULL , + "desc" varchar(256) DEFAULT NULL , + queue_id int DEFAULT NULL , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_udfs +-- + +DROP TABLE IF EXISTS t_escheduler_udfs; +CREATE TABLE t_escheduler_udfs ( + id int NOT NULL , + user_id int NOT NULL , + func_name varchar(100) NOT NULL , + class_name varchar(255) NOT NULL , + type int NOT NULL , + arg_types varchar(255) DEFAULT NULL , + database varchar(255) DEFAULT NULL , + "desc" varchar(255) DEFAULT NULL , + resource_id int NOT NULL , + resource_name varchar(255) NOT NULL , + create_time timestamp NOT NULL , + update_time timestamp NOT NULL , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_user +-- + +DROP TABLE IF EXISTS t_escheduler_user; +CREATE TABLE t_escheduler_user ( + id int NOT NULL , + user_name varchar(64) DEFAULT NULL , + user_password varchar(64) DEFAULT NULL , + user_type int DEFAULT NULL , + email varchar(64) DEFAULT NULL , + phone varchar(11) DEFAULT NULL , + tenant_id int DEFAULT NULL , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + queue varchar(64) DEFAULT NULL , + PRIMARY KEY (id) +); + +-- +-- Table structure for table t_escheduler_version +-- + +DROP TABLE IF EXISTS t_escheduler_version; +CREATE TABLE t_escheduler_version ( + id int NOT NULL , + version varchar(200) NOT NULL, + PRIMARY KEY (id) +) ; +create index version_index on t_escheduler_version(version); + +-- +-- Table structure for table t_escheduler_worker_group +-- + +DROP TABLE IF EXISTS t_escheduler_worker_group; +CREATE TABLE t_escheduler_worker_group ( + id bigint NOT NULL , + name varchar(256) DEFAULT NULL , + ip_list varchar(256) DEFAULT NULL , + create_time timestamp DEFAULT NULL , + update_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; + +-- +-- Table structure for table t_escheduler_worker_server +-- + +DROP TABLE IF EXISTS t_escheduler_worker_server; +CREATE TABLE t_escheduler_worker_server ( + id int NOT NULL , + host varchar(45) DEFAULT NULL , + port int DEFAULT NULL , + zk_directory varchar(64) DEFAULT NULL , + res_info varchar(255) DEFAULT NULL , + create_time timestamp DEFAULT NULL , + last_heartbeat_time timestamp DEFAULT NULL , + PRIMARY KEY (id) +) ; + + +DROP SEQUENCE IF EXISTS t_escheduler_access_token_id_sequence; +CREATE SEQUENCE t_escheduler_access_token_id_sequence; +ALTER TABLE t_escheduler_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_access_token_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_alert_id_sequence; +CREATE SEQUENCE t_escheduler_alert_id_sequence; +ALTER TABLE t_escheduler_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_alert_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_alertgroup_id_sequence; +CREATE SEQUENCE t_escheduler_alertgroup_id_sequence; +ALTER TABLE t_escheduler_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_alertgroup_id_sequence'); + +DROP SEQUENCE IF EXISTS t_escheduler_command_id_sequence; +CREATE SEQUENCE t_escheduler_command_id_sequence; +ALTER TABLE t_escheduler_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_command_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_datasource_id_sequence; +CREATE SEQUENCE t_escheduler_datasource_id_sequence; +ALTER TABLE t_escheduler_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_datasource_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_master_server_id_sequence; +CREATE SEQUENCE t_escheduler_master_server_id_sequence; +ALTER TABLE t_escheduler_master_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_master_server_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_process_definition_id_sequence; +CREATE SEQUENCE t_escheduler_process_definition_id_sequence; +ALTER TABLE t_escheduler_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_process_definition_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_process_instance_id_sequence; +CREATE SEQUENCE t_escheduler_process_instance_id_sequence; +ALTER TABLE t_escheduler_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_process_instance_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_project_id_sequence; +CREATE SEQUENCE t_escheduler_project_id_sequence; +ALTER TABLE t_escheduler_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_project_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_queue_id_sequence; +CREATE SEQUENCE t_escheduler_queue_id_sequence; +ALTER TABLE t_escheduler_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_queue_id_sequence'); + +DROP SEQUENCE IF EXISTS t_escheduler_relation_datasource_user_id_sequence; +CREATE SEQUENCE t_escheduler_relation_datasource_user_id_sequence; +ALTER TABLE t_escheduler_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_datasource_user_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_relation_process_instance_id_sequence; +CREATE SEQUENCE t_escheduler_relation_process_instance_id_sequence; +ALTER TABLE t_escheduler_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_process_instance_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_relation_project_user_id_sequence; +CREATE SEQUENCE t_escheduler_relation_project_user_id_sequence; +ALTER TABLE t_escheduler_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_project_user_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_relation_resources_user_id_sequence; +CREATE SEQUENCE t_escheduler_relation_resources_user_id_sequence; +ALTER TABLE t_escheduler_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_resources_user_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_relation_udfs_user_id_sequence; +CREATE SEQUENCE t_escheduler_relation_udfs_user_id_sequence; +ALTER TABLE t_escheduler_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_udfs_user_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_relation_user_alertgroup_id_sequence; +CREATE SEQUENCE t_escheduler_relation_user_alertgroup_id_sequence; +ALTER TABLE t_escheduler_relation_user_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_relation_user_alertgroup_id_sequence'); + +DROP SEQUENCE IF EXISTS t_escheduler_resources_id_sequence; +CREATE SEQUENCE t_escheduler_resources_id_sequence; +ALTER TABLE t_escheduler_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_resources_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_schedules_id_sequence; +CREATE SEQUENCE t_escheduler_schedules_id_sequence; +ALTER TABLE t_escheduler_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_schedules_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_task_instance_id_sequence; +CREATE SEQUENCE t_escheduler_task_instance_id_sequence; +ALTER TABLE t_escheduler_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_task_instance_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_tenant_id_sequence; +CREATE SEQUENCE t_escheduler_tenant_id_sequence; +ALTER TABLE t_escheduler_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_tenant_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_udfs_id_sequence; +CREATE SEQUENCE t_escheduler_udfs_id_sequence; +ALTER TABLE t_escheduler_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_udfs_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_user_id_sequence; +CREATE SEQUENCE t_escheduler_user_id_sequence; +ALTER TABLE t_escheduler_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_user_id_sequence'); + +DROP SEQUENCE IF EXISTS t_escheduler_version_id_sequence; +CREATE SEQUENCE t_escheduler_version_id_sequence; +ALTER TABLE t_escheduler_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_version_id_sequence'); + +DROP SEQUENCE IF EXISTS t_escheduler_worker_group_id_sequence; +CREATE SEQUENCE t_escheduler_worker_group_id_sequence; +ALTER TABLE t_escheduler_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_worker_group_id_sequence'); +DROP SEQUENCE IF EXISTS t_escheduler_worker_server_id_sequence; +CREATE SEQUENCE t_escheduler_worker_server_id_sequence; +ALTER TABLE t_escheduler_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_escheduler_worker_server_id_sequence'); \ No newline at end of file diff --git a/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_dml.sql b/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_dml.sql new file mode 100644 index 0000000000..72e60ace3b --- /dev/null +++ b/sql/create/release-1.2.0_schema/postgresql/dolphinscheduler_dml.sql @@ -0,0 +1,8 @@ +-- Records of t_escheduler_user,user : admin , password : escheduler123 +INSERT INTO "t_escheduler_user" VALUES ('1', 'admin', '055a97b5fcd6d120372ad1976518f371', '0', 'xxx@qq.com', 'xx', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22'); +INSERT INTO "t_escheduler_alertgroup" VALUES (1, 'escheduler管理员告警组', '0', 'escheduler管理员告警组','2018-11-29 10:20:39', '2018-11-29 10:20:39'); +INSERT INTO "t_escheduler_relation_user_alertgroup" VALUES ('1', '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); + +-- Records of t_escheduler_queue,default queue name : default +INSERT INTO "t_escheduler_queue" VALUES ('1', 'default', 'default'); +INSERT INTO "t_escheduler_version" VALUES ('1', '1.2.0'); \ No newline at end of file diff --git a/sql/soft_version b/sql/soft_version index 1cc5f657e0..867e52437a 100644 --- a/sql/soft_version +++ b/sql/soft_version @@ -1 +1 @@ -1.1.0 \ No newline at end of file +1.2.0 \ No newline at end of file diff --git a/sql/upgrade/1.0.1_schema/mysql/escheduler_ddl.sql b/sql/upgrade/1.0.1_schema/mysql/dolphinscheduler_ddl.sql similarity index 100% rename from sql/upgrade/1.0.1_schema/mysql/escheduler_ddl.sql rename to sql/upgrade/1.0.1_schema/mysql/dolphinscheduler_ddl.sql diff --git a/sql/upgrade/1.0.1_schema/mysql/escheduler_dml.sql b/sql/upgrade/1.0.1_schema/mysql/dolphinscheduler_dml.sql similarity index 100% rename from sql/upgrade/1.0.1_schema/mysql/escheduler_dml.sql rename to sql/upgrade/1.0.1_schema/mysql/dolphinscheduler_dml.sql diff --git a/sql/upgrade/1.0.2_schema/mysql/escheduler_ddl.sql b/sql/upgrade/1.0.2_schema/mysql/dolphinscheduler_ddl.sql similarity index 100% rename from sql/upgrade/1.0.2_schema/mysql/escheduler_ddl.sql rename to sql/upgrade/1.0.2_schema/mysql/dolphinscheduler_ddl.sql diff --git a/sql/upgrade/1.0.2_schema/mysql/escheduler_dml.sql b/sql/upgrade/1.0.2_schema/mysql/dolphinscheduler_dml.sql similarity index 100% rename from sql/upgrade/1.0.2_schema/mysql/escheduler_dml.sql rename to sql/upgrade/1.0.2_schema/mysql/dolphinscheduler_dml.sql diff --git a/sql/upgrade/1.1.0_schema/mysql/escheduler_ddl.sql b/sql/upgrade/1.1.0_schema/mysql/dolphinscheduler_ddl.sql similarity index 100% rename from sql/upgrade/1.1.0_schema/mysql/escheduler_ddl.sql rename to sql/upgrade/1.1.0_schema/mysql/dolphinscheduler_ddl.sql diff --git a/sql/upgrade/1.1.0_schema/mysql/escheduler_dml.sql b/sql/upgrade/1.1.0_schema/mysql/dolphinscheduler_dml.sql similarity index 100% rename from sql/upgrade/1.1.0_schema/mysql/escheduler_dml.sql rename to sql/upgrade/1.1.0_schema/mysql/dolphinscheduler_dml.sql diff --git a/sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_ddl.sql b/sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_ddl.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_dml.sql b/sql/upgrade/1.2.0_schema/mysql/dolphinscheduler_dml.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_ddl.sql b/sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_ddl.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_dml.sql b/sql/upgrade/1.2.0_schema/postgresql/dolphinscheduler_dml.sql new file mode 100644 index 0000000000..e69de29bb2