Browse Source

Merge remote-tracking branch 'upstream/dev' into dev

pull/3/MERGE
lidongdai 5 years ago
parent
commit
a1892c8fdb
  1. 6
      README.md
  2. 6
      dockerfile/conf/escheduler/conf/i18n/messages.properties
  3. 6
      dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties
  4. 6
      dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties
  5. 8
      docs/en_US/EasyScheduler Proposal.md
  6. 24
      docs/en_US/architecture-design.md
  7. 8
      docs/en_US/backend-deployment.md
  8. 2
      docs/en_US/frontend-deployment.md
  9. 4
      docs/en_US/frontend-development.md
  10. 6
      docs/en_US/system-manual.md
  11. 4
      docs/en_US/upgrade.md
  12. 4
      docs/zh_CN/前端开发文档.md
  13. 4
      docs/zh_CN/升级文档.md
  14. 8
      docs/zh_CN/后端部署文档.md
  15. 4
      docs/zh_CN/系统使用手册.md
  16. 26
      docs/zh_CN/系统架构设计.md
  17. 2
      escheduler-alert/src/main/java/cn/escheduler/alert/AlertServer.java
  18. 8
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java
  19. 65
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java
  20. 12
      escheduler-alert/src/main/resources/alert.properties
  21. 40
      escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessDefinitionController.java
  22. 21
      escheduler-api/src/main/java/cn/escheduler/api/controller/ProjectController.java
  23. 18
      escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java
  24. 58
      escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java
  25. 26
      escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java
  26. 6
      escheduler-api/src/main/resources/i18n/messages.properties
  27. 6
      escheduler-api/src/main/resources/i18n/messages_en_US.properties
  28. 6
      escheduler-api/src/main/resources/i18n/messages_zh_CN.properties
  29. 6
      escheduler-api/src/test/java/cn/escheduler/api/HttpClientTest.java
  30. 14
      escheduler-common/src/main/java/cn/escheduler/common/Constants.java
  31. 3
      escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java
  32. 2
      escheduler-common/src/main/java/cn/escheduler/common/shell/AbstractShell.java
  33. 219
      escheduler-common/src/main/java/cn/escheduler/common/task/flink/FlinkParameters.java
  34. 2
      escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadPoolExecutors.java
  35. 14
      escheduler-common/src/main/java/cn/escheduler/common/utils/ConnectionUtils.java
  36. 3
      escheduler-common/src/main/java/cn/escheduler/common/utils/TaskParametersUtils.java
  37. 8
      escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java
  38. 2
      escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java
  39. 22
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java
  40. 15
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java
  41. 8
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java
  42. 16
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProjectMapper.java
  43. 13
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProjectMapperProvider.java
  44. 2
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ScheduleMapperProvider.java
  45. 8
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TaskInstanceMapperProvider.java
  46. 48
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/DolphinSchedulerManager.java
  47. 104
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/MysqlUpgradeDao.java
  48. 132
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/PostgresqlUpgradeDao.java
  49. 129
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java
  50. 22
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateDolphinScheduler.java
  51. 16
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitDolphinScheduler.java
  52. 18
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeDolphinScheduler.java
  53. 6
      escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java
  54. 110
      escheduler-server/src/main/java/cn/escheduler/server/utils/FlinkArgsUtils.java
  55. 6
      escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java
  56. 5
      escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java
  57. 3
      escheduler-server/src/main/java/cn/escheduler/server/worker/task/AbstractTask.java
  58. 3
      escheduler-server/src/main/java/cn/escheduler/server/worker/task/TaskManager.java
  59. 118
      escheduler-server/src/main/java/cn/escheduler/server/worker/task/flink/FlinkTask.java
  60. 3
      escheduler-server/src/main/resources/application_master.properties
  61. 4
      escheduler-server/src/main/resources/application_worker.properties
  62. 4
      escheduler-ui/build/webpack.config.prod.js
  63. 80
      escheduler-ui/install-escheduler-ui.sh
  64. 12
      escheduler-ui/package.json
  65. 4
      escheduler-ui/src/js/conf/home/pages/dag/_source/config.js
  66. 3
      escheduler-ui/src/js/conf/home/pages/dag/_source/dag.scss
  67. 8
      escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue
  68. 92
      escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/_source/dependItemList.vue
  69. 13
      escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/dependent.vue
  70. 388
      escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue
  71. 2
      escheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js
  72. 4
      escheduler-ui/src/js/conf/home/pages/dag/definitionDetails.vue
  73. 211
      escheduler-ui/src/js/conf/home/pages/dag/img/toobar_flink.svg
  74. 4
      escheduler-ui/src/js/conf/home/pages/dag/index.vue
  75. 4
      escheduler-ui/src/js/conf/home/pages/dag/instanceDetails.vue
  76. 2
      escheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/email.vue
  77. 5
      escheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/timing.vue
  78. 1
      escheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/_source/list.vue
  79. 1
      escheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue
  80. 29
      escheduler-ui/src/js/conf/home/store/dag/actions.js
  81. 1
      escheduler-ui/src/js/conf/home/store/dag/mutations.js
  82. 2
      escheduler-ui/src/js/conf/home/store/dag/state.js
  83. 6
      escheduler-ui/src/js/module/i18n/locale/zh_CN.js
  84. 1
      escheduler-ui/src/lib/@analysys/ans-ui/package.json
  85. 1
      escheduler-ui/src/lib/@fedor/io/package.json
  86. 1
      escheduler-ui/src/lib/@vue/crontab/example/app.vue
  87. BIN
      escheduler-ui/src/view/docs/zh_CN/_book/images/flink_edit.png
  88. 254
      install.sh
  89. 4
      package.xml
  90. 103
      pom.xml
  91. 2
      script/create-escheduler.sh
  92. 0
      script/del-zk-node.py
  93. 4
      script/env/.escheduler_env.sh
  94. 2
      script/escheduler-daemon.sh
  95. 43
      script/monitor-server.py
  96. 0
      script/scp-hosts.sh
  97. 0
      script/start-all.sh
  98. 0
      script/stop-all.sh
  99. 2
      script/upgrade-escheduler.sh
  100. 0
      sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql
  101. Some files were not shown because too many files have changed in this diff Show More

6
README.md

@ -64,7 +64,7 @@ Overload processing: Task queue mechanism, the number of schedulable tasks on a
- <a href="http://106.75.43.194:8888" target="_blank">Online Demo</a> - <a href="http://106.75.43.194:8888" target="_blank">Online Demo</a>
More documentation please refer to <a href="https://analysys.github.io/easyscheduler_docs_cn/" target="_blank">[EasyScheduler online documentation]</a> More documentation please refer to <a href="https://analysys.github.io/easyscheduler_docs/" target="_blank">[EasyScheduler online documentation]</a>
### Recent R&D plan ### Recent R&D plan
Work plan of Easy Scheduler: [R&D plan](https://github.com/analysys/EasyScheduler/projects/1), where `In Develop` card is the features of 1.1.0 version , TODO card is to be done (including feature ideas) Work plan of Easy Scheduler: [R&D plan](https://github.com/analysys/EasyScheduler/projects/1), where `In Develop` card is the features of 1.1.0 version , TODO card is to be done (including feature ideas)
@ -80,7 +80,9 @@ Easy Scheduler uses a lot of excellent open source projects, such as google guav
It is because of the shoulders of these open source projects that the birth of the Easy Scheduler is possible. We are very grateful for all the open source software used! We also hope that we will not only be the beneficiaries of open source, but also be open source contributors, so we decided to contribute to easy scheduling and promised long-term updates. We also hope that partners who have the same passion and conviction for open source will join in and contribute to open source! It is because of the shoulders of these open source projects that the birth of the Easy Scheduler is possible. We are very grateful for all the open source software used! We also hope that we will not only be the beneficiaries of open source, but also be open source contributors, so we decided to contribute to easy scheduling and promised long-term updates. We also hope that partners who have the same passion and conviction for open source will join in and contribute to open source!
### Get Help ### Get Help
The fastest way to get response from our developers is to submit issues, or add our wechat : 510570367 1. Submit an issue
1. Mail list: dev@dolphinscheduler.apache.org. Mail to dev-subscribe@dolphinscheduler.apache.org, follow the reply to subscribe the mail list.
1. Contact WeChat group manager, ID 510570367. This is for Mandarin(CN) discussion.
### License ### License
Please refer to [LICENSE](https://github.com/analysys/EasyScheduler/blob/dev/LICENSE) file. Please refer to [LICENSE](https://github.com/analysys/EasyScheduler/blob/dev/LICENSE) file.

6
dockerfile/conf/escheduler/conf/i18n/messages.properties

@ -111,6 +111,7 @@ UPDATE_PROJECT_NOTES=update project
PROJECT_ID=project id PROJECT_ID=project id
QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
QUERY_ALL_PROJECT_LIST_NOTES=query all project list
DELETE_PROJECT_BY_ID_NOTES=delete project by id DELETE_PROJECT_BY_ID_NOTES=delete project by id
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
@ -152,10 +153,12 @@ VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
LOGIN_NOTES=user login LOGIN_NOTES=user login
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_ID=process definition id
PROCESS_DEFINITION_IDS=process definition ids
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
PAGE_NO=page no PAGE_NO=page no
PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_ID=process instance id
PROCESS_INSTANCE_JSON=process instance info(json format) PROCESS_INSTANCE_JSON=process instance info(json format)
@ -170,6 +173,9 @@ LIMIT=limit
VIEW_TREE_NOTES=view tree VIEW_TREE_NOTES=view tree
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
PROCESS_DEFINITION_ID_LIST=process definition id list PROCESS_DEFINITION_ID_LIST=process definition id list
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
TASK_ID=task instance id TASK_ID=task instance id

6
dockerfile/conf/escheduler/conf/i18n/messages_en_US.properties

@ -111,6 +111,7 @@ UPDATE_PROJECT_NOTES=update project
PROJECT_ID=project id PROJECT_ID=project id
QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
QUERY_ALL_PROJECT_LIST_NOTES=query all project list
DELETE_PROJECT_BY_ID_NOTES=delete project by id DELETE_PROJECT_BY_ID_NOTES=delete project by id
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
@ -152,10 +153,12 @@ VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
LOGIN_NOTES=user login LOGIN_NOTES=user login
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_ID=process definition id
PROCESS_DEFINITION_IDS=process definition ids
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
PAGE_NO=page no PAGE_NO=page no
PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_ID=process instance id
PROCESS_INSTANCE_JSON=process instance info(json format) PROCESS_INSTANCE_JSON=process instance info(json format)
@ -170,6 +173,9 @@ LIMIT=limit
VIEW_TREE_NOTES=view tree VIEW_TREE_NOTES=view tree
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
PROCESS_DEFINITION_ID_LIST=process definition id list PROCESS_DEFINITION_ID_LIST=process definition id list
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
TASK_ID=task instance id TASK_ID=task instance id

6
dockerfile/conf/escheduler/conf/i18n/messages_zh_CN.properties

@ -110,6 +110,7 @@ UPDATE_PROJECT_NOTES=更新项目
PROJECT_ID=项目ID PROJECT_ID=项目ID
QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息
QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表
QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目
DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID
QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目
QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目
@ -155,8 +156,10 @@ RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID
QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表 QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表
QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义
PAGE_NO=页码号 PAGE_NO=页码号
PROCESS_INSTANCE_ID=流程实例ID PROCESS_INSTANCE_ID=流程实例ID
PROCESS_INSTANCE_IDS=流程实例ID集合
PROCESS_INSTANCE_JSON=流程实例信息(json格式) PROCESS_INSTANCE_JSON=流程实例信息(json格式)
SCHEDULE_TIME=定时时间 SCHEDULE_TIME=定时时间
SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 SYNC_DEFINE=更新流程实例的信息是否同步到流程定义
@ -168,6 +171,9 @@ LIMIT=显示多少条
VIEW_TREE_NOTES=树状图 VIEW_TREE_NOTES=树状图
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID
PROCESS_DEFINITION_ID_LIST=流程定义id列表 PROCESS_DEFINITION_ID_LIST=流程定义id列表
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID
TASK_ID=任务实例ID TASK_ID=任务实例ID

8
docs/en_US/EasyScheduler Proposal.md

@ -6,7 +6,7 @@ EasyScheduler is a distributed ETL scheduling engine with powerful DAG visualiza
## Proposal ## Proposal
EasyScheduler provides many easy-to-use features to accelerate the engineer enficiency on data ETL workflow job. We propose a new concept of 'instance of process' and 'instance of task' to let developers to tuning their jobs on the running state of workflow instead of changing the task's template. Its main objectives are as follows: EasyScheduler provides many easy-to-use features to accelerate the engineer efficiency on data ETL workflow job. We propose a new concept of 'instance of process' and 'instance of task' to let developers to tuning their jobs on the running state of workflow instead of changing the task's template. Its main objectives are as follows:
- Define the complex tasks' dependencies & triggers in a DAG graph by dragging and dropping. - Define the complex tasks' dependencies & triggers in a DAG graph by dragging and dropping.
- Support cluster HA. - Support cluster HA.
@ -30,7 +30,7 @@ The codes are already under Apache License Version 2.0.
We want to find a data processing tool with the following features: We want to find a data processing tool with the following features:
- Easy to use,developers can build a ETL process with a very simple drag and drop operation. not only for ETL developers,people who can't write code also can use this tool for ETL operation such as system adminitrator. - Easy to use,developers can build a ETL process with a very simple drag and drop operation. not only for ETL developers,people who can't write code also can use this tool for ETL operation such as system administrator.
- Solving the problem of "complex task dependencies" , and it can monitor the ETL running status. - Solving the problem of "complex task dependencies" , and it can monitor the ETL running status.
- Support multi-tenant. - Support multi-tenant.
- Support many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc. - Support many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc.
@ -73,7 +73,7 @@ Thus, it is very unlikely that EasyScheduler becomes orphaned.
EasyScheduler's core developers have been running it as a community-oriented open source project for some time, several of them already have experience working with open source communities, they are also active in presto, alluxio and other projects. At the same time, we will learn more open source experiences by following the Apache way in our incubator journey. EasyScheduler's core developers have been running it as a community-oriented open source project for some time, several of them already have experience working with open source communities, they are also active in presto, alluxio and other projects. At the same time, we will learn more open source experiences by following the Apache way in our incubator journey.
### Homogenous Developers ### Homogeneous Developers
The current developers work across a variety of organizations including Analysys, guandata and hydee; The current developers work across a variety of organizations including Analysys, guandata and hydee;
some individual developers are accepted as developers of EasyScheduler as well. some individual developers are accepted as developers of EasyScheduler as well.
@ -110,7 +110,7 @@ The project consists of three distinct codebases: core and document. The address
## Source and Intellectual Property Submission Plan ## Source and Intellectual Property Submission Plan
As soon as EasyScheduler is approved to join Apache Incubator, Analysys will provide the Software Grant Agreement(SGA) and intial committers will submit ICLA(s). The code is already licensed under the Apache Software License, version 2.0. As soon as EasyScheduler is approved to join Apache Incubator, Analysys will provide the Software Grant Agreement(SGA) and initial committers will submit ICLA(s). The code is already licensed under the Apache Software License, version 2.0.
## External Dependencies ## External Dependencies

24
docs/en_US/architecture-design.md

@ -6,7 +6,7 @@ Before explaining the architecture of the schedule system, let us first understa
**DAG:** Full name Directed Acyclic Graph,referred to as DAG。Tasks in the workflow are assembled in the form of directed acyclic graphs, which are topologically traversed from nodes with zero indegrees of ingress until there are no successor nodes. For example, the following picture: **DAG:** Full name Directed Acyclic Graph,referred to as DAG。Tasks in the workflow are assembled in the form of directed acyclic graphs, which are topologically traversed from nodes with zero indegrees of ingress until there are no successor nodes. For example, the following picture:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/dag_examples_cn.jpg" alt="dag示例" width="60%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/dag_examples_cn.jpg" alt="dag示例" width="60%" />
<p align="center"> <p align="center">
<em>dag example</em> <em>dag example</em>
</p> </p>
@ -111,7 +111,7 @@ Before explaining the architecture of the schedule system, let us first understa
The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into two roles according to their roles: The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into two roles according to their roles:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/master_slave.png" alt="master-slave role" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_slave.png" alt="master-slave role" width="50%" />
</p> </p>
- The role of Master is mainly responsible for task distribution and supervising the health status of Slave. It can dynamically balance the task to Slave, so that the Slave node will not be "busy" or "free". - The role of Master is mainly responsible for task distribution and supervising the health status of Slave. It can dynamically balance the task to Slave, so that the Slave node will not be "busy" or "free".
@ -125,7 +125,7 @@ Problems in the design of centralized :
###### Decentralization ###### Decentralization
<p align="center" <p align="center"
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/decentralization.png" alt="decentralized" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="decentralized" width="50%" />
</p> </p>
- In the decentralized design, there is usually no Master/Slave concept, all roles are the same, the status is equal, the global Internet is a typical decentralized distributed system, networked arbitrary node equipment down machine , all will only affect a small range of features. - In the decentralized design, there is usually no Master/Slave concept, all roles are the same, the status is equal, the global Internet is a typical decentralized distributed system, networked arbitrary node equipment down machine , all will only affect a small range of features.
@ -141,13 +141,13 @@ EasyScheduler uses ZooKeeper distributed locks to implement only one Master to e
1. The core process algorithm for obtaining distributed locks is as follows 1. The core process algorithm for obtaining distributed locks is as follows
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/distributed_lock.png" alt="Get Distributed Lock Process" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/distributed_lock.png" alt="Get Distributed Lock Process" width="50%" />
</p> </p>
2. Scheduler thread distributed lock implementation flow chart in EasyScheduler: 2. Scheduler thread distributed lock implementation flow chart in EasyScheduler:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/distributed_lock_procss.png" alt="Get Distributed Lock Process" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/distributed_lock_procss.png" alt="Get Distributed Lock Process" width="50%" />
</p> </p>
##### Third, the thread is insufficient loop waiting problem ##### Third, the thread is insufficient loop waiting problem
@ -156,7 +156,7 @@ EasyScheduler uses ZooKeeper distributed locks to implement only one Master to e
- If a large number of sub-processes are nested in a large DAG, the following figure will result in a "dead" state: - If a large number of sub-processes are nested in a large DAG, the following figure will result in a "dead" state:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/lack_thread.png" alt="Thread is not enough to wait for loop" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/lack_thread.png" alt="Thread is not enough to wait for loop" width="50%" />
</p> </p>
In the above figure, MainFlowThread waits for SubFlowThread1 to end, SubFlowThread1 waits for SubFlowThread2 to end, SubFlowThread2 waits for SubFlowThread3 to end, and SubFlowThread3 waits for a new thread in the thread pool, then the entire DAG process cannot end, and thus the thread cannot be released. This forms the state of the child parent process loop waiting. At this point, the scheduling cluster will no longer be available unless a new Master is started to add threads to break such a "stuck." In the above figure, MainFlowThread waits for SubFlowThread1 to end, SubFlowThread1 waits for SubFlowThread2 to end, SubFlowThread2 waits for SubFlowThread3 to end, and SubFlowThread3 waits for a new thread in the thread pool, then the entire DAG process cannot end, and thus the thread cannot be released. This forms the state of the child parent process loop waiting. At this point, the scheduling cluster will no longer be available unless a new Master is started to add threads to break such a "stuck."
@ -180,7 +180,7 @@ Fault tolerance is divided into service fault tolerance and task retry. Service
Service fault tolerance design relies on ZooKeeper's Watcher mechanism. The implementation principle is as follows: Service fault tolerance design relies on ZooKeeper's Watcher mechanism. The implementation principle is as follows:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/fault-tolerant.png" alt="EasyScheduler Fault Tolerant Design" width="40%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant.png" alt="EasyScheduler Fault Tolerant Design" width="40%" />
</p> </p>
The Master monitors the directories of other Masters and Workers. If the remove event is detected, the process instance is fault-tolerant or the task instance is fault-tolerant according to the specific business logic. The Master monitors the directories of other Masters and Workers. If the remove event is detected, the process instance is fault-tolerant or the task instance is fault-tolerant according to the specific business logic.
@ -190,7 +190,7 @@ The Master monitors the directories of other Masters and Workers. If the remove
- Master fault tolerance flow chart: - Master fault tolerance flow chart:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/fault-tolerant_master.png" alt="Master Fault Tolerance Flowchart" width="40%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant_master.png" alt="Master Fault Tolerance Flowchart" width="40%" />
</p> </p>
After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler thread in EasyScheduler. It traverses the DAG to find the "Running" and "Submit Successful" tasks, and monitors the status of its task instance for the "Running" task. You need to determine whether the Task Queue already exists. If it exists, monitor the status of the task instance. If it does not exist, resubmit the task instance. After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler thread in EasyScheduler. It traverses the DAG to find the "Running" and "Submit Successful" tasks, and monitors the status of its task instance for the "Running" task. You need to determine whether the Task Queue already exists. If it exists, monitor the status of the task instance. If it does not exist, resubmit the task instance.
@ -200,7 +200,7 @@ After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler
- Worker fault tolerance flow chart: - Worker fault tolerance flow chart:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/fault-tolerant_worker.png" alt="Worker Fault Tolerance Flowchart" width="40%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant_worker.png" alt="Worker Fault Tolerance Flowchart" width="40%" />
</p> </p>
Once the Master Scheduler thread finds the task instance as "need to be fault tolerant", it takes over the task and resubmits. Once the Master Scheduler thread finds the task instance as "need to be fault tolerant", it takes over the task and resubmits.
@ -239,13 +239,13 @@ In the early scheduling design, if there is no priority design and fair scheduli
- The priority of the process definition is that some processes need to be processed before other processes. This can be configured at the start of the process or at the time of scheduled start. There are 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below - The priority of the process definition is that some processes need to be processed before other processes. This can be configured at the start of the process or at the time of scheduled start. There are 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/process_priority.png" alt="Process Priority Configuration" width="40%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/process_priority.png" alt="Process Priority Configuration" width="40%" />
</p> </p>
- The priority of the task is also divided into 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below - The priority of the task is also divided into 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_priority.png" alt="task priority configuration" width="35%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/task_priority.png" alt="task priority configuration" width="35%" />
</p> </p>
##### VI. Logback and gRPC implement log access ##### VI. Logback and gRPC implement log access
@ -256,7 +256,7 @@ In the early scheduling design, if there is no priority design and fair scheduli
- Considering the lightweightness of EasyScheduler as much as possible, gRPC was chosen to implement remote access log information. - Considering the lightweightness of EasyScheduler as much as possible, gRPC was chosen to implement remote access log information.
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/grpc.png" alt="grpc remote access" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/grpc.png" alt="grpc remote access" width="50%" />
</p> </p>
- We use a custom Logback FileAppender and Filter function to generate a log file for each task instance. - We use a custom Logback FileAppender and Filter function to generate a log file for each task instance.

8
docs/en_US/backend-deployment.md

@ -69,7 +69,7 @@ Configure SSH secret-free login on deployment machines and other installation ma
Execute scripts for creating tables and importing basic data Execute scripts for creating tables and importing basic data
``` ```
sh ./script/create_escheduler.sh sh ./script/create-escheduler.sh
``` ```
#### Preparations 5: Modify the deployment directory permissions and operation parameters #### Preparations 5: Modify the deployment directory permissions and operation parameters
@ -158,11 +158,11 @@ After normal compilation, ./target/escheduler-{version}/ is generated in the cur
* stop all services in the cluster * stop all services in the cluster
` sh ./bin/stop_all.sh` ` sh ./bin/stop-all.sh`
* start all services in the cluster * start all services in the cluster
` sh ./bin/start_all.sh` ` sh ./bin/start-all.sh`
* start and stop one master server * start and stop one master server
@ -201,7 +201,7 @@ sh ./bin/escheduler-daemon.sh stop alert-server
Database upgrade is a function added in version 1.0.2. The database can be upgraded automatically by executing the following command: Database upgrade is a function added in version 1.0.2. The database can be upgraded automatically by executing the following command:
```upgrade ```upgrade
sh ./script/upgrade_escheduler.sh sh ./script/upgrade-escheduler.sh
``` ```

2
docs/en_US/frontend-deployment.md

@ -64,7 +64,7 @@ server {
index index.html index.html; index index.html index.html;
} }
location /escheduler { location /escheduler {
proxy_pass http://192.168.xx.xx:12345; # nterface address (self-modifying) proxy_pass http://192.168.xx.xx:12345; # interface address (self-modifying)
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header x_real_ipP $remote_addr; proxy_set_header x_real_ipP $remote_addr;

4
docs/en_US/frontend-development.md

@ -63,7 +63,7 @@ Copy it to the corresponding directory of the server (front-end service static p
Visit address` http://localhost:8888/#/` Visit address` http://localhost:8888/#/`
#### Start with node and daemon under Liunx #### Start with node and daemon under Linux
Install pm2 `npm install -g pm2` Install pm2 `npm install -g pm2`
@ -238,7 +238,7 @@ The internal common component of the `src/js/module/components` project writes t
├── conditions ├── conditions
├── conditions.vue ├── conditions.vue
└── _source └── _source
└── serach.vue └── search.vue
└── util.js └── util.js
``` ```

6
docs/en_US/system-manual.md

@ -340,7 +340,7 @@ conf/common/hadoop.properties
- Queues are used to execute spark, mapreduce and other programs, which require the use of "queue" parameters. - Queues are used to execute spark, mapreduce and other programs, which require the use of "queue" parameters.
- "Security" - > "Queue Manage" - > "Creat Queue" - "Security" - > "Queue Manage" - > "Create Queue"
<p align="center"> <p align="center">
<img src="https://user-images.githubusercontent.com/53217792/61841945-078f4480-aec9-11e9-92fb-05b6f42f07d6.png" width="60%" /> <img src="https://user-images.githubusercontent.com/53217792/61841945-078f4480-aec9-11e9-92fb-05b6f42f07d6.png" width="60%" />
</p> </p>
@ -403,7 +403,7 @@ conf/common/hadoop.properties
try { try {
// execute // execute
response = httpclient.execute(httpPost); response = httpclient.execute(httpPost);
// eponse status code 200 // response status code 200
if (response.getStatusLine().getStatusCode() == 200) { if (response.getStatusLine().getStatusCode() == 200) {
String content = EntityUtils.toString(response.getEntity(), "UTF-8"); String content = EntityUtils.toString(response.getEntity(), "UTF-8");
System.out.println(content); System.out.println(content);
@ -533,7 +533,7 @@ conf/common/hadoop.properties
<img src="https://user-images.githubusercontent.com/53217792/61844464-1af2dd80-aed2-11e9-9486-6cf1b8585aa5.png" width="60%" /> <img src="https://user-images.githubusercontent.com/53217792/61844464-1af2dd80-aed2-11e9-9486-6cf1b8585aa5.png" width="60%" />
</p> </p>
- Datasource: The data source type of stored procedure supports MySQL and POSTGRRESQL, and chooses the corresponding data source. - Datasource: The data source type of stored procedure supports MySQL and POSTGRESQL, and chooses the corresponding data source.
- Method: The method name of the stored procedure - Method: The method name of the stored procedure
- Custom parameters: Custom parameter types of stored procedures support IN and OUT, and data types support nine data types: VARCHAR, INTEGER, LONG, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP and BOOLEAN. - Custom parameters: Custom parameter types of stored procedures support IN and OUT, and data types support nine data types: VARCHAR, INTEGER, LONG, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP and BOOLEAN.

4
docs/en_US/upgrade.md

@ -5,7 +5,7 @@
## 2. Stop all services of escheduler ## 2. Stop all services of escheduler
`sh ./script/stop_all.sh` `sh ./script/stop-all.sh`
## 3. Download the new version of the installation package ## 3. Download the new version of the installation package
@ -23,7 +23,7 @@
- Execute database upgrade script - Execute database upgrade script
`sh ./script/upgrade_escheduler.sh` `sh ./script/upgrade-escheduler.sh`
## 5. Backend service upgrade ## 5. Backend service upgrade

4
docs/zh_CN/前端开发文档.md

@ -64,7 +64,7 @@ npm install node-sass --unsafe-perm //单独安装node-sass依赖
访问地址 `http://localhost:8888/#/` 访问地址 `http://localhost:8888/#/`
#### Liunx下使用node启动并且守护进程 #### Linux下使用node启动并且守护进程
安装pm2 `npm install -g pm2` 安装pm2 `npm install -g pm2`
@ -237,7 +237,7 @@ export default {
├── conditions ├── conditions
├── conditions.vue ├── conditions.vue
└── _source └── _source
└── serach.vue └── search.vue
└── util.js └── util.js
``` ```

4
docs/zh_CN/升级文档.md

@ -5,7 +5,7 @@
## 2. 停止escheduler所有服务 ## 2. 停止escheduler所有服务
`sh ./script/stop_all.sh` `sh ./script/stop-all.sh`
## 3. 下载新版本的安装包 ## 3. 下载新版本的安装包
@ -23,7 +23,7 @@
- 执行数据库升级脚本 - 执行数据库升级脚本
`sh ./script/upgrade_escheduler.sh` `sh ./script/upgrade-escheduler.sh`
## 5. 后端服务升级 ## 5. 后端服务升级

8
docs/zh_CN/后端部署文档.md

@ -63,7 +63,7 @@ escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL
``` ```
执行创建表和导入基础数据脚本 执行创建表和导入基础数据脚本
``` ```
sh ./script/create_escheduler.sh sh ./script/create-escheduler.sh
``` ```
#### 准备五: 修改部署目录权限及运行参数 #### 准备五: 修改部署目录权限及运行参数
@ -164,11 +164,11 @@ install.sh : 一键部署脚本
* 一键停止集群所有服务 * 一键停止集群所有服务
` sh ./bin/stop_all.sh` ` sh ./bin/stop-all.sh`
* 一键开启集群所有服务 * 一键开启集群所有服务
` sh ./bin/start_all.sh` ` sh ./bin/start-all.sh`
* 启停Master * 启停Master
@ -206,5 +206,5 @@ sh ./bin/escheduler-daemon.sh stop alert-server
## 3、数据库升级 ## 3、数据库升级
数据库升级是在1.0.2版本增加的功能,执行以下命令即可自动升级数据库 数据库升级是在1.0.2版本增加的功能,执行以下命令即可自动升级数据库
``` ```
sh ./script/upgrade_escheduler.sh sh ./script/upgrade-escheduler.sh
``` ```

4
docs/zh_CN/系统使用手册.md

@ -110,7 +110,7 @@
> 点击任务实例节点,点击**查看历史**,可以查看该工作流实例运行的该任务实例列表 > 点击任务实例节点,点击**查看历史**,可以查看该工作流实例运行的该任务实例列表
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_history.png" width="60%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/task_history.png" width="60%" />
</p> </p>
@ -391,7 +391,7 @@ conf/common/hadoop.properties
try { try {
// execute // execute
response = httpclient.execute(httpPost); response = httpclient.execute(httpPost);
// eponse status code 200 // response status code 200
if (response.getStatusLine().getStatusCode() == 200) { if (response.getStatusLine().getStatusCode() == 200) {
String content = EntityUtils.toString(response.getEntity(), "UTF-8"); String content = EntityUtils.toString(response.getEntity(), "UTF-8");
System.out.println(content); System.out.println(content);

26
docs/zh_CN/系统架构设计.md

@ -5,7 +5,7 @@
**DAG:** 全称Directed Acyclic Graph,简称DAG。工作流中的Task任务以有向无环图的形式组装起来,从入度为零的节点进行拓扑遍历,直到无后继节点为止。举例如下图: **DAG:** 全称Directed Acyclic Graph,简称DAG。工作流中的Task任务以有向无环图的形式组装起来,从入度为零的节点进行拓扑遍历,直到无后继节点为止。举例如下图:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/dag_examples_cn.jpg" alt="dag示例" width="60%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/dag_examples_cn.jpg" alt="dag示例" width="60%" />
<p align="center"> <p align="center">
<em>dag示例</em> <em>dag示例</em>
</p> </p>
@ -37,7 +37,7 @@
#### 2.1 系统架构图 #### 2.1 系统架构图
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/architecture.jpg" alt="系统架构图" width="70%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/architecture.jpg" alt="系统架构图" width="70%" />
<p align="center"> <p align="center">
<em>系统架构图</em> <em>系统架构图</em>
</p> </p>
@ -98,7 +98,7 @@
中心化的设计理念比较简单,分布式集群中的节点按照角色分工,大体上分为两种角色: 中心化的设计理念比较简单,分布式集群中的节点按照角色分工,大体上分为两种角色:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/master_slave.png" alt="master-slave角色" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_slave.png" alt="master-slave角色" width="50%" />
</p> </p>
- Master的角色主要负责任务分发并监督Slave的健康状态,可以动态的将任务均衡到Slave上,以致Slave节点不至于“忙死”或”闲死”的状态。 - Master的角色主要负责任务分发并监督Slave的健康状态,可以动态的将任务均衡到Slave上,以致Slave节点不至于“忙死”或”闲死”的状态。
@ -115,7 +115,7 @@
###### 去中心化 ###### 去中心化
<p align="center" <p align="center"
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/decentralization.png" alt="去中心化" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="去中心化" width="50%" />
</p> </p>
- 在去中心化设计里,通常没有Master/Slave的概念,所有的角色都是一样的,地位是平等的,全球互联网就是一个典型的去中心化的分布式系统,联网的任意节点设备down机,都只会影响很小范围的功能。 - 在去中心化设计里,通常没有Master/Slave的概念,所有的角色都是一样的,地位是平等的,全球互联网就是一个典型的去中心化的分布式系统,联网的任意节点设备down机,都只会影响很小范围的功能。
@ -131,12 +131,12 @@
EasyScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master执行Scheduler,或者只有一台Worker执行任务的提交。 EasyScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master执行Scheduler,或者只有一台Worker执行任务的提交。
1. 获取分布式锁的核心流程算法如下 1. 获取分布式锁的核心流程算法如下
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/distributed_lock.png" alt="获取分布式锁流程" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/distributed_lock.png" alt="获取分布式锁流程" width="50%" />
</p> </p>
2. EasyScheduler中Scheduler线程分布式锁实现流程图: 2. EasyScheduler中Scheduler线程分布式锁实现流程图:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/distributed_lock_procss.png" alt="获取分布式锁流程" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/distributed_lock_procss.png" alt="获取分布式锁流程" width="50%" />
</p> </p>
@ -146,7 +146,7 @@ EasyScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master
- 如果一个大的DAG中嵌套了很多子流程,如下图则会产生“死等”状态: - 如果一个大的DAG中嵌套了很多子流程,如下图则会产生“死等”状态:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/lack_thread.png" alt="线程不足循环等待问题" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/lack_thread.png" alt="线程不足循环等待问题" width="50%" />
</p> </p>
上图中MainFlowThread等待SubFlowThread1结束,SubFlowThread1等待SubFlowThread2结束, SubFlowThread2等待SubFlowThread3结束,而SubFlowThread3等待线程池有新线程,则整个DAG流程不能结束,从而其中的线程也不能释放。这样就形成的子父流程循环等待的状态。此时除非启动新的Master来增加线程来打破这样的”僵局”,否则调度集群将不能再使用。 上图中MainFlowThread等待SubFlowThread1结束,SubFlowThread1等待SubFlowThread2结束, SubFlowThread2等待SubFlowThread3结束,而SubFlowThread3等待线程池有新线程,则整个DAG流程不能结束,从而其中的线程也不能释放。这样就形成的子父流程循环等待的状态。此时除非启动新的Master来增加线程来打破这样的”僵局”,否则调度集群将不能再使用。
@ -169,7 +169,7 @@ EasyScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master
服务容错设计依赖于ZooKeeper的Watcher机制,实现原理如图: 服务容错设计依赖于ZooKeeper的Watcher机制,实现原理如图:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/fault-tolerant.png" alt="EasyScheduler容错设计" width="40%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant.png" alt="EasyScheduler容错设计" width="40%" />
</p> </p>
其中Master监控其他Master和Worker的目录,如果监听到remove事件,则会根据具体的业务逻辑进行流程实例容错或者任务实例容错。 其中Master监控其他Master和Worker的目录,如果监听到remove事件,则会根据具体的业务逻辑进行流程实例容错或者任务实例容错。
@ -178,7 +178,7 @@ EasyScheduler使用ZooKeeper分布式锁来实现同一时刻只有一台Master
- Master容错流程图: - Master容错流程图:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/fault-tolerant_master.png" alt="Master容错流程图" width="40%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant_master.png" alt="Master容错流程图" width="40%" />
</p> </p>
ZooKeeper Master容错完成之后则重新由EasyScheduler中Scheduler线程调度,遍历 DAG 找到”正在运行”和“提交成功”的任务,对”正在运行”的任务监控其任务实例的状态,对”提交成功”的任务需要判断Task Queue中是否已经存在,如果存在则同样监控任务实例的状态,如果不存在则重新提交任务实例。 ZooKeeper Master容错完成之后则重新由EasyScheduler中Scheduler线程调度,遍历 DAG 找到”正在运行”和“提交成功”的任务,对”正在运行”的任务监控其任务实例的状态,对”提交成功”的任务需要判断Task Queue中是否已经存在,如果存在则同样监控任务实例的状态,如果不存在则重新提交任务实例。
@ -187,7 +187,7 @@ ZooKeeper Master容错完成之后则重新由EasyScheduler中Scheduler线程调
- Worker容错流程图: - Worker容错流程图:
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/fault-tolerant_worker.png" alt="Worker容错流程图" width="40%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant_worker.png" alt="Worker容错流程图" width="40%" />
</p> </p>
Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则接管任务并进行重新提交。 Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则接管任务并进行重新提交。
@ -224,12 +224,12 @@ Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则
- 其中流程定义的优先级是考虑到有些流程需要先于其他流程进行处理,这个可以在流程启动或者定时启动时配置,共有5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图 - 其中流程定义的优先级是考虑到有些流程需要先于其他流程进行处理,这个可以在流程启动或者定时启动时配置,共有5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/process_priority.png" alt="流程优先级配置" width="40%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/process_priority.png" alt="流程优先级配置" width="40%" />
</p> </p>
- 任务的优先级也分为5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图 - 任务的优先级也分为5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_priority.png" alt="任务优先级配置" width="35%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/task_priority.png" alt="任务优先级配置" width="35%" />
</p> </p>
@ -242,7 +242,7 @@ Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则
- 介于考虑到尽可能的EasyScheduler的轻量级性,所以选择了gRPC实现远程访问日志信息。 - 介于考虑到尽可能的EasyScheduler的轻量级性,所以选择了gRPC实现远程访问日志信息。
<p align="center"> <p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/grpc.png" alt="grpc远程访问" width="50%" /> <img src="https://analysys.github.io/easyscheduler_docs_cn/images/grpc.png" alt="grpc远程访问" width="50%" />
</p> </p>

2
escheduler-alert/src/main/java/cn/escheduler/alert/AlertServer.java

@ -39,7 +39,7 @@ public class AlertServer {
private AlertSender alertSender; private AlertSender alertSender;
private static AlertServer instance; private static volatile AlertServer instance;
private AlertServer() { private AlertServer() {

8
escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java

@ -47,11 +47,15 @@ public class Constants {
public static final String MAIL_SENDER = "mail.sender"; public static final String MAIL_SENDER = "mail.sender";
public static final String MAIL_USER = "mail.user";
public static final String MAIL_PASSWD = "mail.passwd"; public static final String MAIL_PASSWD = "mail.passwd";
public static final String XLS_FILE_PATH = "xls.file.path"; public static final String XLS_FILE_PATH = "xls.file.path";
public static final String MAIL_HOST = "mail.host"; public static final String MAIL_HOST = "mail.smtp.host";
public static final String MAIL_PORT = "mail.smtp.port";
public static final String MAIL_SMTP_AUTH = "mail.smtp.auth"; public static final String MAIL_SMTP_AUTH = "mail.smtp.auth";
@ -61,6 +65,8 @@ public class Constants {
public static final String MAIL_SMTP_SSL_ENABLE = "mail.smtp.ssl.enable"; public static final String MAIL_SMTP_SSL_ENABLE = "mail.smtp.ssl.enable";
public static final String MAIL_SMTP_SSL_TRUST="mail.smtp.ssl.trust";
public static final String TEXT_HTML_CHARSET_UTF_8 = "text/html;charset=utf-8"; public static final String TEXT_HTML_CHARSET_UTF_8 = "text/html;charset=utf-8";
public static final String STRING_TRUE = "true"; public static final String STRING_TRUE = "true";

65
escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java

@ -56,6 +56,8 @@ public class MailUtils {
public static final String mailSender = getString(Constants.MAIL_SENDER); public static final String mailSender = getString(Constants.MAIL_SENDER);
public static final String mailUser = getString(Constants.MAIL_USER);
public static final String mailPasswd = getString(Constants.MAIL_PASSWD); public static final String mailPasswd = getString(Constants.MAIL_PASSWD);
public static final Boolean mailUseStartTLS = getBoolean(Constants.MAIL_SMTP_STARTTLS_ENABLE); public static final Boolean mailUseStartTLS = getBoolean(Constants.MAIL_SMTP_STARTTLS_ENABLE);
@ -68,6 +70,8 @@ public class MailUtils {
public static final String sslEnable = getString(Constants.MAIL_SMTP_SSL_ENABLE); public static final String sslEnable = getString(Constants.MAIL_SMTP_SSL_ENABLE);
public static final String sslTrust = getString(Constants.MAIL_SMTP_SSL_TRUST);
private static Template MAIL_TEMPLATE; private static Template MAIL_TEMPLATE;
static { static {
@ -126,16 +130,10 @@ public class MailUtils {
HtmlEmail email = new HtmlEmail(); HtmlEmail email = new HtmlEmail();
try { try {
// set the SMTP sending server, 163 as follows: "smtp.163.com" Session session = getSession();
email.setHostName(mailServerHost); email.setMailSession(session);
email.setSmtpPort(mailServerPort); email.setFrom(mailSender);
//set charset
email.setCharset(Constants.UTF_8); email.setCharset(Constants.UTF_8);
// TLS verification
email.setTLS(Boolean.valueOf(starttlsEnable));
// SSL verification
email.setSSL(Boolean.valueOf(sslEnable));
if (CollectionUtils.isNotEmpty(receivers)){ if (CollectionUtils.isNotEmpty(receivers)){
// receivers mail // receivers mail
for (String receiver : receivers) { for (String receiver : receivers) {
@ -286,23 +284,11 @@ public class MailUtils {
// Security.addProvider(new com.sun.net.ssl.internal.ssl.Provider()); // Security.addProvider(new com.sun.net.ssl.internal.ssl.Provider());
// final String SSL_FACTORY = "javax.net.ssl.SSLSocketFactory"; // final String SSL_FACTORY = "javax.net.ssl.SSLSocketFactory";
Properties props = new Properties();
props.setProperty(Constants.MAIL_HOST, mailServerHost);
props.setProperty(Constants.MAIL_SMTP_AUTH, Constants.STRING_TRUE);
props.setProperty(Constants.MAIL_TRANSPORT_PROTOCOL, mailProtocol);
props.setProperty(Constants.MAIL_SMTP_STARTTLS_ENABLE, starttlsEnable);
props.setProperty("mail.smtp.ssl.enable", sslEnable);
Authenticator auth = new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
// mail username and password
return new PasswordAuthentication(mailSender, mailPasswd);
}
};
// 1. The first step in creating mail: creating session // 1. The first step in creating mail: creating session
Session session = Session.getInstance(props, auth); Session session = getSession();
// Setting debug mode, can be turned off // Setting debug mode, can be turned off
session.setDebug(false); session.setDebug(false);
// 2. creating mail: Creating a MimeMessage // 2. creating mail: Creating a MimeMessage
MimeMessage msg = new MimeMessage(session); MimeMessage msg = new MimeMessage(session);
// 3. set sender // 3. set sender
@ -314,6 +300,32 @@ public class MailUtils {
return msg; return msg;
} }
/**
* get session
* @return
*/
private static Session getSession() {
Properties props = new Properties();
props.setProperty(Constants.MAIL_HOST, mailServerHost);
props.setProperty(Constants.MAIL_PORT, String.valueOf(mailServerPort));
props.setProperty(Constants.MAIL_SMTP_AUTH, Constants.STRING_TRUE);
props.setProperty(Constants.MAIL_TRANSPORT_PROTOCOL, mailProtocol);
props.setProperty(Constants.MAIL_SMTP_STARTTLS_ENABLE, starttlsEnable);
props.setProperty(Constants.MAIL_SMTP_SSL_ENABLE, sslEnable);
props.setProperty(Constants.MAIL_SMTP_SSL_TRUST, sslTrust);
Authenticator auth = new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
// mail username and password
return new PasswordAuthentication(mailUser, mailPasswd);
}
};
Session session = Session.getInstance(props, auth);
return session;
}
/** /**
* *
* @param receiversCc * @param receiversCc
@ -370,13 +382,6 @@ public class MailUtils {
* @throws EmailException * @throws EmailException
*/ */
private static Map<String, Object> getStringObjectMap(String title, String content, ShowType showType, Map<String, Object> retMap, HtmlEmail email) throws EmailException { private static Map<String, Object> getStringObjectMap(String title, String content, ShowType showType, Map<String, Object> retMap, HtmlEmail email) throws EmailException {
// sender's mailbox
email.setFrom(mailSender, mailSender);
/**
* if you need authentication information, set authentication: username-password.
* The registered name and password of the sender on the mail server respectively
*/
email.setAuthentication(mailSender, mailPasswd);
/** /**
* the subject of the message to be sent * the subject of the message to be sent

12
escheduler-alert/src/main/resources/alert.properties

@ -3,15 +3,17 @@ alert.type=EMAIL
# mail server configuration # mail server configuration
mail.protocol=SMTP mail.protocol=SMTP
mail.server.host=smtp.exmail.qq.com mail.server.host=xxx.xxx.com
mail.server.port=25 mail.server.port=25
mail.sender=xxxxxxx mail.sender=xxx@xxx.com
mail.passwd=xxxxxxx mail.user=xxx@xxx.com
mail.passwd=111111
# TLS # TLS
mail.smtp.starttls.enable=false mail.smtp.starttls.enable=true
# SSL # SSL
mail.smtp.ssl.enable=true mail.smtp.ssl.enable=false
mail.smtp.ssl.trust=xxx.xxx.com
#xls file path,need create if not exist #xls file path,need create if not exist
xls.file.path=/tmp/xls xls.file.path=/tmp/xls

40
escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessDefinitionController.java

@ -390,10 +390,14 @@ public class ProcessDefinitionController extends BaseController{
* @param processDefinitionId * @param processDefinitionId
* @return * @return
*/ */
@ApiOperation(value = "deleteProcessDefinitionById", notes= "DELETE_PROCESS_DEFINITION_BY_ID_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", dataType = "Int", example = "100")
})
@GetMapping(value="/delete") @GetMapping(value="/delete")
@ResponseStatus(HttpStatus.OK) @ResponseStatus(HttpStatus.OK)
public Result deleteProcessDefinitionById(@RequestAttribute(value = Constants.SESSION_USER) User loginUser, public Result deleteProcessDefinitionById(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@PathVariable String projectName, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam("processDefinitionId") Integer processDefinitionId @RequestParam("processDefinitionId") Integer processDefinitionId
){ ){
try{ try{
@ -415,10 +419,14 @@ public class ProcessDefinitionController extends BaseController{
* @param processDefinitionIds * @param processDefinitionIds
* @return * @return
*/ */
@ApiOperation(value = "batchDeleteProcessDefinitionByIds", notes= "BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionIds", value = "PROCESS_DEFINITION_IDS", type = "String")
})
@GetMapping(value="/batch-delete") @GetMapping(value="/batch-delete")
@ResponseStatus(HttpStatus.OK) @ResponseStatus(HttpStatus.OK)
public Result batchDeleteProcessDefinitionByIds(@RequestAttribute(value = Constants.SESSION_USER) User loginUser, public Result batchDeleteProcessDefinitionByIds(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@PathVariable String projectName, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam("processDefinitionIds") String processDefinitionIds @RequestParam("processDefinitionIds") String processDefinitionIds
){ ){
try{ try{
@ -459,4 +467,28 @@ public class ProcessDefinitionController extends BaseController{
} }
} }
/**
* query proccess definition all by project id
*
* @param loginUser
* @return
*/
@ApiOperation(value = "queryProccessDefinitionAllByProjectId", notes= "QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES")
@GetMapping(value="/queryProccessDefinitionAllByProjectId")
@ResponseStatus(HttpStatus.OK)
public Result queryProccessDefinitionAllByProjectId(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("projectId") Integer projectId){
try{
logger.info("query proccess definition list, login user:{}, project id:{}",
loginUser.getUserName(),projectId);
Map<String, Object> result = processDefinitionService.queryProccessDefinitionAllByProjectId(projectId);
return returnDataList(result);
}catch (Exception e){
logger.error(QUERY_PROCCESS_DEFINITION_LIST.getMsg(),e);
return error(QUERY_PROCCESS_DEFINITION_LIST.getCode(), QUERY_PROCCESS_DEFINITION_LIST.getMsg());
}
}
} }

21
escheduler-api/src/main/java/cn/escheduler/api/controller/ProjectController.java

@ -279,5 +279,26 @@ public class ProjectController extends BaseController {
} }
} }
/**
* query all project list
* @param loginUser
* @return
*/
@ApiOperation(value = "queryAllProjectList", notes= "QUERY_ALL_PROJECT_LIST_NOTES")
@GetMapping(value = "/queryAllProjectList")
@ResponseStatus(HttpStatus.OK)
public Result queryAllProjectList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) {
try {
logger.info("login user {}, query all project list", loginUser.getUserName());
Map<String, Object> result = projectService.queryAllProjectList();
return returnDataList(result);
} catch (Exception e) {
logger.error(LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR.getMsg(), e);
return error(Status.LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR.getCode(), Status.LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR.getMsg());
}
}
} }

18
escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java

@ -862,6 +862,24 @@ public class ProcessDefinitionService extends BaseDAGService {
} }
/**
* query proccess definition all by project id
*
* @param projectId
* @return
*/
public Map<String, Object> queryProccessDefinitionAllByProjectId(Integer projectId) {
HashMap<String, Object> result = new HashMap<>(5);
List<ProcessDefinition> resourceList = processDefineMapper.queryAllDefinitionList(projectId);
result.put(Constants.DATA_LIST, resourceList);
putMsg(result, Status.SUCCESS);
return result;
}
/** /**
* Encapsulates the TreeView structure * Encapsulates the TreeView structure
* *

58
escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java

@ -32,10 +32,7 @@ import cn.escheduler.common.model.TaskNodeRelation;
import cn.escheduler.common.process.Property; import cn.escheduler.common.process.Property;
import cn.escheduler.common.queue.ITaskQueue; import cn.escheduler.common.queue.ITaskQueue;
import cn.escheduler.common.queue.TaskQueueFactory; import cn.escheduler.common.queue.TaskQueueFactory;
import cn.escheduler.common.utils.CollectionUtils; import cn.escheduler.common.utils.*;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.common.utils.placeholder.BusinessTimeUtils; import cn.escheduler.common.utils.placeholder.BusinessTimeUtils;
import cn.escheduler.dao.ProcessDao; import cn.escheduler.dao.ProcessDao;
import cn.escheduler.dao.mapper.*; import cn.escheduler.dao.mapper.*;
@ -493,29 +490,64 @@ public class ProcessInstanceService extends BaseDAGService {
return result; return result;
} }
int delete = processDao.deleteWorkProcessInstanceById(processInstanceId); // delete zk queue
processDao.deleteAllSubWorkProcessByParentId(processInstanceId);
processDao.deleteWorkProcessMapByParentId(processInstanceId);
if (delete > 0) {
if (CollectionUtils.isNotEmpty(taskInstanceList)){ if (CollectionUtils.isNotEmpty(taskInstanceList)){
for (TaskInstance taskInstance : taskInstanceList){ for (TaskInstance taskInstance : taskInstanceList){
// task instance priority // task instance priority
int taskInstancePriority = taskInstance.getTaskInstancePriority().ordinal(); int taskInstancePriority = taskInstance.getTaskInstancePriority().ordinal();
String nodeValue=processInstancePriority + "_" + processInstanceId + "_" +taskInstancePriority + "_" + taskInstance.getId();
StringBuilder nodeValueSb = new StringBuilder(100);
nodeValueSb.append(processInstancePriority)
.append(UNDERLINE)
.append(processInstanceId)
.append(UNDERLINE)
.append(taskInstancePriority)
.append(UNDERLINE)
.append(taskInstance.getId())
.append(UNDERLINE);
int taskWorkerGroupId = processDao.getTaskWorkerGroupId(taskInstance);
WorkerGroup workerGroup = workerGroupMapper.queryById(taskWorkerGroupId);
if(workerGroup == null){
nodeValueSb.append(DEFAULT_WORKER_ID);
}else {
String ips = workerGroup.getIpList();
StringBuilder ipSb = new StringBuilder(100);
String[] ipArray = ips.split(COMMA);
for (String ip : ipArray) {
long ipLong = IpUtils.ipToLong(ip);
ipSb.append(ipLong).append(COMMA);
}
if(ipSb.length() > 0) {
ipSb.deleteCharAt(ipSb.length() - 1);
}
nodeValueSb.append(ipSb);
}
try { try {
logger.info("delete task queue node : {}",nodeValue); logger.info("delete task queue node : {}",nodeValueSb.toString());
tasksQueue.removeNode(cn.escheduler.common.Constants.SCHEDULER_TASKS_QUEUE, nodeValue); tasksQueue.removeNode(cn.escheduler.common.Constants.SCHEDULER_TASKS_QUEUE, nodeValueSb.toString());
}catch (Exception e){ }catch (Exception e){
logger.error("delete task queue node : {}", nodeValue); logger.error("delete task queue node : {}", nodeValueSb.toString());
} }
} }
} }
// delete database cascade
int delete = processDao.deleteWorkProcessInstanceById(processInstanceId);
processDao.deleteAllSubWorkProcessByParentId(processInstanceId);
processDao.deleteWorkProcessMapByParentId(processInstanceId);
if (delete > 0) {
putMsg(result, Status.SUCCESS); putMsg(result, Status.SUCCESS);
} else { } else {
putMsg(result, Status.DELETE_PROCESS_INSTANCE_BY_ID_ERROR); putMsg(result, Status.DELETE_PROCESS_INSTANCE_BY_ID_ERROR);
} }
return result; return result;
} }

26
escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java

@ -367,4 +367,30 @@ public class ProjectService extends BaseService{
} }
/**
* query all project list
* @return
*/
public Map<String, Object> queryAllProjectList() {
Map<String, Object> result = new HashMap<>();
List<Project> projects = projectMapper.queryAllProjectList();
List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAll();
if(projects != null){
Set set = new HashSet<>();
for (ProcessDefinition processDefinition : processDefinitions){
set.add(processDefinition.getProjectId());
}
List<Project> tempDeletelist = new ArrayList<Project>();
for (Project project : projects) {
if(!set.contains(project.getId())){
tempDeletelist.add(project);
}
}
projects.removeAll(tempDeletelist);
}
result.put(Constants.DATA_LIST, projects);
putMsg(result,Status.SUCCESS);
return result;
}
} }

6
escheduler-api/src/main/resources/i18n/messages.properties

@ -113,6 +113,7 @@ QUERY_PROJECT_BY_ID_NOTES=query project info by project id
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
DELETE_PROJECT_BY_ID_NOTES=delete project by id DELETE_PROJECT_BY_ID_NOTES=delete project by id
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
QUERY_ALL_PROJECT_LIST_NOTES=query all project list
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
TASK_RECORD_TAG=task record related operation TASK_RECORD_TAG=task record related operation
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
@ -152,10 +153,12 @@ VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
LOGIN_NOTES=user login LOGIN_NOTES=user login
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_ID=process definition id
PROCESS_DEFINITION_IDS=process definition ids
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
PAGE_NO=page no PAGE_NO=page no
PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_ID=process instance id
PROCESS_INSTANCE_JSON=process instance info(json format) PROCESS_INSTANCE_JSON=process instance info(json format)
@ -170,6 +173,9 @@ LIMIT=limit
VIEW_TREE_NOTES=view tree VIEW_TREE_NOTES=view tree
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
PROCESS_DEFINITION_ID_LIST=process definition id list PROCESS_DEFINITION_ID_LIST=process definition id list
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
TASK_ID=task instance id TASK_ID=task instance id

6
escheduler-api/src/main/resources/i18n/messages_en_US.properties

@ -111,6 +111,7 @@ UPDATE_PROJECT_NOTES=update project
PROJECT_ID=project id PROJECT_ID=project id
QUERY_PROJECT_BY_ID_NOTES=query project info by project id QUERY_PROJECT_BY_ID_NOTES=query project info by project id
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
QUERY_ALL_PROJECT_LIST_NOTES=query all project list
DELETE_PROJECT_BY_ID_NOTES=delete project by id DELETE_PROJECT_BY_ID_NOTES=delete project by id
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
@ -152,10 +153,12 @@ VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
LOGIN_NOTES=user login LOGIN_NOTES=user login
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_ID=process definition id
PROCESS_DEFINITION_IDS=process definition ids
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
PAGE_NO=page no PAGE_NO=page no
PROCESS_INSTANCE_ID=process instance id PROCESS_INSTANCE_ID=process instance id
PROCESS_INSTANCE_JSON=process instance info(json format) PROCESS_INSTANCE_JSON=process instance info(json format)
@ -170,6 +173,9 @@ LIMIT=limit
VIEW_TREE_NOTES=view tree VIEW_TREE_NOTES=view tree
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
PROCESS_DEFINITION_ID_LIST=process definition id list PROCESS_DEFINITION_ID_LIST=process definition id list
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
TASK_ID=task instance id TASK_ID=task instance id

6
escheduler-api/src/main/resources/i18n/messages_zh_CN.properties

@ -110,6 +110,7 @@ UPDATE_PROJECT_NOTES=更新项目
PROJECT_ID=项目ID PROJECT_ID=项目ID
QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息
QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表
QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目
DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID
QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目
QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目
@ -155,8 +156,10 @@ RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID
QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表 QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表
QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义
PAGE_NO=页码号 PAGE_NO=页码号
PROCESS_INSTANCE_ID=流程实例ID PROCESS_INSTANCE_ID=流程实例ID
PROCESS_INSTANCE_IDS=流程实例ID集合
PROCESS_INSTANCE_JSON=流程实例信息(json格式) PROCESS_INSTANCE_JSON=流程实例信息(json格式)
SCHEDULE_TIME=定时时间 SCHEDULE_TIME=定时时间
SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 SYNC_DEFINE=更新流程实例的信息是否同步到流程定义
@ -168,6 +171,9 @@ LIMIT=显示多少条
VIEW_TREE_NOTES=树状图 VIEW_TREE_NOTES=树状图
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID
PROCESS_DEFINITION_ID_LIST=流程定义id列表 PROCESS_DEFINITION_ID_LIST=流程定义id列表
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID
TASK_ID=任务实例ID TASK_ID=任务实例ID

6
escheduler-api/src/test/java/cn/escheduler/api/HttpClientTest.java

@ -59,7 +59,7 @@ public class HttpClientTest {
try { try {
// execute // execute
response = httpclient.execute(httpPost); response = httpclient.execute(httpPost);
// eponse status code 200 // response status code 200
if (response.getStatusLine().getStatusCode() == 200) { if (response.getStatusLine().getStatusCode() == 200) {
String content = EntityUtils.toString(response.getEntity(), "UTF-8"); String content = EntityUtils.toString(response.getEntity(), "UTF-8");
logger.info(content); logger.info(content);
@ -96,7 +96,7 @@ public class HttpClientTest {
try { try {
// execute http get request // execute http get request
response = httpclient.execute(httpGet); response = httpclient.execute(httpGet);
// reponse status code 200 // response status code 200
if (response.getStatusLine().getStatusCode() == 200) { if (response.getStatusLine().getStatusCode() == 200) {
String content = EntityUtils.toString(response.getEntity(), "UTF-8"); String content = EntityUtils.toString(response.getEntity(), "UTF-8");
logger.info("start--------------->"); logger.info("start--------------->");
@ -139,7 +139,7 @@ public class HttpClientTest {
try { try {
// execute http get request // execute http get request
response = httpclient.execute(httpGet); response = httpclient.execute(httpGet);
// reponse status code 200 // response status code 200
if (response.getStatusLine().getStatusCode() == 200) { if (response.getStatusLine().getStatusCode() == 200) {
String content = EntityUtils.toString(response.getEntity(), "UTF-8"); String content = EntityUtils.toString(response.getEntity(), "UTF-8");
logger.info("start--------------->"); logger.info("start--------------->");

14
escheduler-common/src/main/java/cn/escheduler/common/Constants.java

@ -906,4 +906,18 @@ public final class Constants {
* hive conf * hive conf
*/ */
public static final String HIVE_CONF = "hiveconf:"; public static final String HIVE_CONF = "hiveconf:";
//flink 任务
public static final String FLINK_YARN_CLUSTER = "yarn-cluster";
public static final String FLINK_RUN_MODE = "-m";
public static final String FLINK_YARN_SLOT = "-ys";
public static final String FLINK_APP_NAME = "-ynm";
public static final String FLINK_TASK_MANAGE = "-yn";
public static final String FLINK_JOB_MANAGE_MEM = "-yjm";
public static final String FLINK_TASK_MANAGE_MEM = "-ytm";
public static final String FLINK_detach = "-d";
public static final String FLINK_MAIN_CLASS = "-c";
} }

3
escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java

@ -29,8 +29,9 @@ public enum TaskType {
* 5 SPARK * 5 SPARK
* 6 PYTHON * 6 PYTHON
* 7 DEPENDENT * 7 DEPENDENT
* 8 FLINK
*/ */
SHELL,SQL, SUB_PROCESS,PROCEDURE,MR,SPARK,PYTHON,DEPENDENT; SHELL,SQL, SUB_PROCESS,PROCEDURE,MR,SPARK,PYTHON,DEPENDENT,FLINK;
public static boolean typeIsNormalTask(String typeName) { public static boolean typeIsNormalTask(String typeName) {
TaskType taskType = TaskType.valueOf(typeName); TaskType taskType = TaskType.valueOf(typeName);

2
escheduler-common/src/main/java/cn/escheduler/common/shell/AbstractShell.java

@ -157,7 +157,7 @@ public abstract class AbstractShell {
BufferedReader inReader = BufferedReader inReader =
new BufferedReader(new InputStreamReader(process new BufferedReader(new InputStreamReader(process
.getInputStream())); .getInputStream()));
final StringBuffer errMsg = new StringBuffer(); final StringBuilder errMsg = new StringBuilder();
// read error and input streams as this would free up the buffers // read error and input streams as this would free up the buffers
// free the error stream buffer // free the error stream buffer

219
escheduler-common/src/main/java/cn/escheduler/common/task/flink/FlinkParameters.java

@ -0,0 +1,219 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.flink;
import cn.escheduler.common.enums.ProgramType;
import cn.escheduler.common.process.ResourceInfo;
import cn.escheduler.common.task.AbstractParameters;
import java.util.List;
import java.util.stream.Collectors;
/**
* spark parameters
*/
public class FlinkParameters extends AbstractParameters {
/**
* major jar
*/
private ResourceInfo mainJar;
/**
* major class
*/
private String mainClass;
/**
* deploy mode yarn-cluster yarn-client yarn-local
*/
private String deployMode;
/**
* arguments
*/
private String mainArgs;
/**
* slot个数
*/
private int slot;
/**
*Yarn application的名字
*/
private String appName;
/**
* taskManager 数量
*/
private int taskManager;
/**
* jobManagerMemory 内存大小
*/
private String jobManagerMemory ;
/**
* taskManagerMemory内存大小
*/
private String taskManagerMemory;
/**
* resource list
*/
private List<ResourceInfo> resourceList;
/**
* The YARN queue to submit to
*/
private String queue;
/**
* other arguments
*/
private String others;
/**
* program type
* 0 JAVA,1 SCALA,2 PYTHON
*/
private ProgramType programType;
public ResourceInfo getMainJar() {
return mainJar;
}
public void setMainJar(ResourceInfo mainJar) {
this.mainJar = mainJar;
}
public String getMainClass() {
return mainClass;
}
public void setMainClass(String mainClass) {
this.mainClass = mainClass;
}
public String getDeployMode() {
return deployMode;
}
public void setDeployMode(String deployMode) {
this.deployMode = deployMode;
}
public String getMainArgs() {
return mainArgs;
}
public void setMainArgs(String mainArgs) {
this.mainArgs = mainArgs;
}
public int getSlot() {
return slot;
}
public void setSlot(int slot) {
this.slot = slot;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public int getTaskManager() {
return taskManager;
}
public void setTaskManager(int taskManager) {
this.taskManager = taskManager;
}
public String getJobManagerMemory() {
return jobManagerMemory;
}
public void setJobManagerMemory(String jobManagerMemory) {
this.jobManagerMemory = jobManagerMemory;
}
public String getTaskManagerMemory() {
return taskManagerMemory;
}
public void setTaskManagerMemory(String taskManagerMemory) {
this.taskManagerMemory = taskManagerMemory;
}
public String getQueue() {
return queue;
}
public void setQueue(String queue) {
this.queue = queue;
}
public List<ResourceInfo> getResourceList() {
return resourceList;
}
public void setResourceList(List<ResourceInfo> resourceList) {
this.resourceList = resourceList;
}
public String getOthers() {
return others;
}
public void setOthers(String others) {
this.others = others;
}
public ProgramType getProgramType() {
return programType;
}
public void setProgramType(ProgramType programType) {
this.programType = programType;
}
@Override
public boolean checkParameters() {
return mainJar != null && programType != null;
}
@Override
public List<String> getResourceFilesList() {
if(resourceList !=null ) {
this.resourceList.add(mainJar);
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
}
}

2
escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadPoolExecutors.java

@ -40,7 +40,7 @@ public class ThreadPoolExecutors {
private static final Logger logger = LoggerFactory.getLogger(ThreadPoolExecutors.class); private static final Logger logger = LoggerFactory.getLogger(ThreadPoolExecutors.class);
private static Executor executor; private static Executor executor;
private static ThreadPoolExecutors threadPoolExecutors; private static volatile ThreadPoolExecutors threadPoolExecutors;
private ThreadPoolExecutors(){} private ThreadPoolExecutors(){}

14
escheduler-common/src/main/java/cn/escheduler/common/utils/MysqlUtils.java → escheduler-common/src/main/java/cn/escheduler/common/utils/ConnectionUtils.java

@ -21,16 +21,16 @@ import org.slf4j.LoggerFactory;
import java.sql.*; import java.sql.*;
public class MysqlUtils { public class ConnectionUtils {
public static final Logger logger = LoggerFactory.getLogger(MysqlUtils.class); public static final Logger logger = LoggerFactory.getLogger(ConnectionUtils.class);
private static MysqlUtils instance; private static ConnectionUtils instance;
MysqlUtils() { ConnectionUtils() {
} }
public static MysqlUtils getInstance() { public static ConnectionUtils getInstance() {
if (null == instance) { if (null == instance) {
syncInit(); syncInit();
} }
@ -39,7 +39,7 @@ public class MysqlUtils {
private static synchronized void syncInit() { private static synchronized void syncInit() {
if (instance == null) { if (instance == null) {
instance = new MysqlUtils(); instance = new ConnectionUtils();
} }
} }
@ -76,7 +76,7 @@ public class MysqlUtils {
} }
public static void releaseResource(ResultSet rs, PreparedStatement ps, Connection conn) { public static void releaseResource(ResultSet rs, PreparedStatement ps, Connection conn) {
MysqlUtils.getInstance().release(rs,ps,conn); ConnectionUtils.getInstance().release(rs,ps,conn);
if (null != rs) { if (null != rs) {
try { try {
rs.close(); rs.close();

3
escheduler-common/src/main/java/cn/escheduler/common/utils/TaskParametersUtils.java

@ -19,6 +19,7 @@ package cn.escheduler.common.utils;
import cn.escheduler.common.enums.TaskType; import cn.escheduler.common.enums.TaskType;
import cn.escheduler.common.task.AbstractParameters; import cn.escheduler.common.task.AbstractParameters;
import cn.escheduler.common.task.dependent.DependentParameters; import cn.escheduler.common.task.dependent.DependentParameters;
import cn.escheduler.common.task.flink.FlinkParameters;
import cn.escheduler.common.task.mr.MapreduceParameters; import cn.escheduler.common.task.mr.MapreduceParameters;
import cn.escheduler.common.task.procedure.ProcedureParameters; import cn.escheduler.common.task.procedure.ProcedureParameters;
import cn.escheduler.common.task.python.PythonParameters; import cn.escheduler.common.task.python.PythonParameters;
@ -63,6 +64,8 @@ public class TaskParametersUtils {
return JSONUtils.parseObject(parameter, PythonParameters.class); return JSONUtils.parseObject(parameter, PythonParameters.class);
case DEPENDENT: case DEPENDENT:
return JSONUtils.parseObject(parameter, DependentParameters.class); return JSONUtils.parseObject(parameter, DependentParameters.class);
case FLINK:
return JSONUtils.parseObject(parameter, FlinkParameters.class);
default: default:
return null; return null;
} }

8
escheduler-dao/src/main/java/cn/escheduler/dao/DaoFactory.java

@ -48,10 +48,10 @@ public class DaoFactory {
synchronized (daoMap) { synchronized (daoMap) {
if (!daoMap.containsKey(className)) { if (!daoMap.containsKey(className)) {
try { try {
T t = BeanContext.getBean(clazz); // T t = BeanContext.getBean(clazz);
// T t = clazz.getConstructor().newInstance(); T t = clazz.getConstructor().newInstance();
// // 实例初始化 // 实例初始化
// t.init(); t.init();
daoMap.put(className, t); daoMap.put(className, t);
} catch (Exception e) { } catch (Exception e) {
logger.error(e.getMessage(), e); logger.error(e.getMessage(), e);

2
escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java

@ -1739,7 +1739,7 @@ public class ProcessDao extends AbstractBaseDao {
* @param processInstanceId * @param processInstanceId
* @return * @return
*/ */
public String queryQueueByProcessInstanceId(int processInstanceId){ public String queryUserQueueByProcessInstanceId(int processInstanceId){
return userMapper.queryQueueByProcessInstanceId(processInstanceId); return userMapper.queryQueueByProcessInstanceId(processInstanceId);
} }

22
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java

@ -255,4 +255,26 @@ public interface ProcessDefinitionMapper {
int updateReceiversAndCcById(@Param("receivers") String receivers, int updateReceiversAndCcById(@Param("receivers") String receivers,
@Param("receiversCc") String receiversCc, @Param("receiversCc") String receiversCc,
@Param("processDefinitionId") int processDefinitionId); @Param("processDefinitionId") int processDefinitionId);
/**
* query all
* @return
*/
@Results(value = {@Result(property = "id", column = "id", id = true, javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "name", column = "name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "version", column = "version", javaType = Integer.class, jdbcType = JdbcType.TINYINT),
@Result(property = "releaseState", column = "release_state", typeHandler = EnumOrdinalTypeHandler.class, javaType = ReleaseState.class, jdbcType = JdbcType.TINYINT),
@Result(property = "projectId", column = "project_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "userId", column = "user_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "desc", column = "desc", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
@Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
@Result(property = "flag", column = "flag", typeHandler = EnumOrdinalTypeHandler.class, javaType = Flag.class, jdbcType = JdbcType.TINYINT),
@Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR)
})
@SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryAll")
List<ProcessDefinition> queryAll();
} }

15
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java

@ -297,4 +297,19 @@ public class ProcessDefinitionMapperProvider {
} }
}.toString(); }.toString();
} }
/**
* query all
* @return
*/
public String queryAll() {
return new SQL() {{
SELECT("id,name,version,release_state,project_id,user_id,`desc`,create_time,update_time,flag,global_params,receivers,receivers_cc");
FROM(TABLE_NAME );
ORDER_BY("create_time desc ");
}}.toString();
}
} }

8
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java

@ -354,7 +354,7 @@ public class ProcessInstanceMapperProvider {
* @return * @return
*/ */
public String listByStatus(Map<String, Object> parameter) { public String listByStatus(Map<String, Object> parameter) {
StringBuffer strStates = new StringBuffer(); StringBuilder strStates = new StringBuilder();
int[] stateArray = (int[]) parameter.get("states"); int[] stateArray = (int[]) parameter.get("states");
for(int i=0;i<stateArray.length;i++){ for(int i=0;i<stateArray.length;i++){
@ -387,7 +387,7 @@ public class ProcessInstanceMapperProvider {
* @return * @return
*/ */
public String queryByHostAndStatus(Map<String, Object> parameter) { public String queryByHostAndStatus(Map<String, Object> parameter) {
StringBuffer strStates = new StringBuffer(); StringBuilder strStates = new StringBuilder();
int[] stateArray = (int[]) parameter.get("states"); int[] stateArray = (int[]) parameter.get("states");
for(int i=0;i<stateArray.length;i++){ for(int i=0;i<stateArray.length;i++){
@ -425,7 +425,7 @@ public class ProcessInstanceMapperProvider {
* @return * @return
*/ */
public String setFailoverByHostAndStateArray(Map<String, Object> parameter) { public String setFailoverByHostAndStateArray(Map<String, Object> parameter) {
StringBuffer strStates = new StringBuffer(); StringBuilder strStates = new StringBuilder();
int[] stateArray = (int[]) parameter.get("states"); int[] stateArray = (int[]) parameter.get("states");
for(int i=0;i<stateArray.length;i++){ for(int i=0;i<stateArray.length;i++){
@ -563,7 +563,7 @@ public class ProcessInstanceMapperProvider {
} }
public String queryLastRunningProcess(Map<String, Object> parameter) { public String queryLastRunningProcess(Map<String, Object> parameter) {
StringBuffer strStates = new StringBuffer(); StringBuilder strStates = new StringBuilder();
int[] stateArray = (int[]) parameter.get("states"); int[] stateArray = (int[]) parameter.get("states");
for(int i=0;i<stateArray.length;i++){ for(int i=0;i<stateArray.length;i++){

16
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProjectMapper.java

@ -192,5 +192,21 @@ public interface ProjectMapper {
@SelectProvider(type = ProjectMapperProvider.class, method = "queryProjectExceptUserId") @SelectProvider(type = ProjectMapperProvider.class, method = "queryProjectExceptUserId")
List<Project> queryProjectExceptUserId(@Param("userId") Integer userId); List<Project> queryProjectExceptUserId(@Param("userId") Integer userId);
/**
* query all project list
* @return
*/
@Results(value = {@Result(property = "id", column = "id", id = true, javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "userId", column = "user_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "name", column = "name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "desc", column = "desc", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "perm", column = "perm", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
@Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
})
@SelectProvider(type = ProjectMapperProvider.class, method = "queryAllProjectList")
List<Project> queryAllProjectList();
} }

13
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProjectMapperProvider.java

@ -240,4 +240,17 @@ public class ProjectMapperProvider {
}}.toString(); }}.toString();
} }
/**
* query all project list
* @return
*/
public String queryAllProjectList() {
return new SQL() {{
SELECT("*");
FROM(TABLE_NAME);
WHERE("flag = 1");
ORDER_BY("create_time desc");
}}.toString();
}
} }

2
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ScheduleMapperProvider.java

@ -163,7 +163,7 @@ public class ScheduleMapperProvider {
*/ */
public String selectAllByProcessDefineArray(Map<String, Object> parameter) { public String selectAllByProcessDefineArray(Map<String, Object> parameter) {
StringBuffer strIds = new StringBuffer(); StringBuilder strIds = new StringBuilder();
int[] idsArray = (int[]) parameter.get("processDefineIds"); int[] idsArray = (int[]) parameter.get("processDefineIds");
for(int i=0;i<idsArray.length;i++){ for(int i=0;i<idsArray.length;i++){
strIds.append(idsArray[i]); strIds.append(idsArray[i]);

8
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TaskInstanceMapperProvider.java

@ -213,7 +213,7 @@ public class TaskInstanceMapperProvider {
* @return * @return
*/ */
public String queryByHostAndStatus(Map<String, Object> parameter) { public String queryByHostAndStatus(Map<String, Object> parameter) {
StringBuffer strStates = new StringBuffer(); StringBuilder strStates = new StringBuilder();
int[] stateArray = (int[]) parameter.get("states"); int[] stateArray = (int[]) parameter.get("states");
for(int i=0;i<stateArray.length;i++){ for(int i=0;i<stateArray.length;i++){
strStates.append(stateArray[i]); strStates.append(stateArray[i]);
@ -246,7 +246,7 @@ public class TaskInstanceMapperProvider {
* @return * @return
*/ */
public String queryLimitNumByHostAndStatus(Map<String, Object> parameter) { public String queryLimitNumByHostAndStatus(Map<String, Object> parameter) {
StringBuffer strStates = new StringBuffer(); StringBuilder strStates = new StringBuilder();
int[] stateArray = (int[]) parameter.get("states"); int[] stateArray = (int[]) parameter.get("states");
for(int i=0;i<stateArray.length;i++){ for(int i=0;i<stateArray.length;i++){
strStates.append(stateArray[i]); strStates.append(stateArray[i]);
@ -278,7 +278,7 @@ public class TaskInstanceMapperProvider {
* @return * @return
*/ */
public String setFailoverByHostAndStateArray(Map<String, Object> parameter) { public String setFailoverByHostAndStateArray(Map<String, Object> parameter) {
StringBuffer strStates = new StringBuffer(); StringBuilder strStates = new StringBuilder();
int[] stateArray = (int[]) parameter.get("states"); int[] stateArray = (int[]) parameter.get("states");
int state = ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(); int state = ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal();
for(int i=0;i<stateArray.length;i++){ for(int i=0;i<stateArray.length;i++){
@ -419,7 +419,7 @@ public class TaskInstanceMapperProvider {
*/ */
public String countTask(Map<String, Object> parameter){ public String countTask(Map<String, Object> parameter){
StringBuffer taskIdsStr = new StringBuffer(); StringBuilder taskIdsStr = new StringBuilder();
int[] stateArray = (int[]) parameter.get("taskIds"); int[] stateArray = (int[]) parameter.get("taskIds");
for(int i=0;i<stateArray.length;i++){ for(int i=0;i<stateArray.length;i++){
taskIdsStr.append(stateArray[i]); taskIdsStr.append(stateArray[i]);

48
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/EschedulerManager.java → escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/DolphinSchedulerManager.java

@ -16,6 +16,7 @@
*/ */
package cn.escheduler.dao.upgrade; package cn.escheduler.dao.upgrade;
import cn.escheduler.common.enums.DbType;
import cn.escheduler.common.utils.SchemaUtils; import cn.escheduler.common.utils.SchemaUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -25,30 +26,51 @@ import java.util.List;
/** /**
* upgrade manager * upgrade manager
*/ */
public class EschedulerManager { public class DolphinSchedulerManager {
private static final Logger logger = LoggerFactory.getLogger(EschedulerManager.class); private static final Logger logger = LoggerFactory.getLogger(DolphinSchedulerManager.class);
UpgradeDao upgradeDao = UpgradeDao.getInstance(); UpgradeDao upgradeDao;
private void initUpgradeDao() {
DbType dbType = UpgradeDao.getDbType();
if (dbType != null) {
switch (dbType) {
case MYSQL:
upgradeDao = MysqlUpgradeDao.getInstance();
break;
case POSTGRESQL:
upgradeDao = PostgresqlUpgradeDao.getInstance();
break;
default:
logger.error("not support sql type: {},can't upgrade", dbType);
throw new IllegalArgumentException("not support sql type,can't upgrade");
}
}
}
public DolphinSchedulerManager() {
initUpgradeDao();
}
public void initEscheduler() { public void initDolphinScheduler() {
// Determines whether the escheduler table structure has been init // Determines whether the escheduler table structure has been init
if(upgradeDao.isExistsTable("t_escheduler_version") || upgradeDao.isExistsTable("t_escheduler_queue")) { if(upgradeDao.isExistsTable("t_escheduler_version") || upgradeDao.isExistsTable("t_escheduler_queue")) {
logger.info("The database has been initialized. Skip the initialization step"); logger.info("The database has been initialized. Skip the initialization step");
return; return;
} }
this.initEschedulerSchema(); this.initDolphinSchedulerSchema();
} }
public void initEschedulerSchema() { public void initDolphinSchedulerSchema() {
logger.info("Start initializing the escheduler manager mysql table structure"); logger.info("Start initializing the DolphinScheduler manager table structure");
upgradeDao.initEschedulerSchema(); upgradeDao.initSchema();
} }
/** /**
* upgrade escheduler * upgrade DolphinScheduler
*/ */
public void upgradeEscheduler() throws Exception{ public void upgradeDolphinScheduler() throws Exception{
// Gets a list of all upgrades // Gets a list of all upgrades
List<String> schemaList = SchemaUtils.getAllSchemaList(); List<String> schemaList = SchemaUtils.getAllSchemaList();
@ -76,11 +98,11 @@ public class EschedulerManager {
schemaVersion = schemaDir.split("_")[0]; schemaVersion = schemaDir.split("_")[0];
if(SchemaUtils.isAGreatVersion(schemaVersion , version)) { if(SchemaUtils.isAGreatVersion(schemaVersion , version)) {
logger.info("upgrade escheduler metadata version from " + version + " to " + schemaVersion); logger.info("upgrade DolphinScheduler metadata version from " + version + " to " + schemaVersion);
logger.info("Begin upgrading escheduler's mysql table structure"); logger.info("Begin upgrading DolphinScheduler's table structure");
upgradeDao.upgradeEscheduler(schemaDir); upgradeDao.upgradeDolphinScheduler(schemaDir);
if(SchemaUtils.isAGreatVersion(version,"1.0.1")){ if(SchemaUtils.isAGreatVersion(version,"1.0.1")){
version = upgradeDao.getCurrentVersion(); version = upgradeDao.getCurrentVersion();
}else { }else {

104
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/MysqlUpgradeDao.java

@ -0,0 +1,104 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.dao.upgrade;
import cn.escheduler.common.utils.ConnectionUtils;
import cn.escheduler.dao.datasource.ConnectionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
public class MysqlUpgradeDao extends UpgradeDao {
public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class);
private static final String T_VERSION_NAME = "t_escheduler_version";
private static final String rootDir = System.getProperty("user.dir");
@Override
protected void init() {
}
private static class MysqlUpgradeDaoHolder {
private static final MysqlUpgradeDao INSTANCE = new MysqlUpgradeDao();
}
private MysqlUpgradeDao() {
}
public static final MysqlUpgradeDao getInstance() {
return MysqlUpgradeDaoHolder.INSTANCE;
}
/**
* Determines whether a table exists
* @param tableName
* @return
*/
public boolean isExistsTable(String tableName) {
Connection conn = null;
try {
conn = ConnectionFactory.getDataSource().getConnection();
ResultSet rs = conn.getMetaData().getTables(null, null, tableName, null);
if (rs.next()) {
return true;
} else {
return false;
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(null, null, conn);
}
}
/**
* Determines whether a field exists in the specified table
* @param tableName
* @param columnName
* @return
*/
public boolean isExistsColumn(String tableName,String columnName) {
Connection conn = null;
try {
conn = ConnectionFactory.getDataSource().getConnection();
ResultSet rs = conn.getMetaData().getColumns(null,null,tableName,columnName);
if (rs.next()) {
return true;
} else {
return false;
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(null, null, conn);
}
}
}

132
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/PostgresqlUpgradeDao.java

@ -0,0 +1,132 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.dao.upgrade;
import cn.escheduler.common.utils.ConnectionUtils;
import cn.escheduler.dao.datasource.ConnectionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
public class PostgresqlUpgradeDao extends UpgradeDao {
public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class);
private static final String T_VERSION_NAME = "t_escheduler_version";
private static final String rootDir = System.getProperty("user.dir");
private static final String schema = getSchema();
@Override
protected void init() {
}
private static class PostgresqlUpgradeDaoHolder {
private static final PostgresqlUpgradeDao INSTANCE = new PostgresqlUpgradeDao();
}
private PostgresqlUpgradeDao() {
}
public static final PostgresqlUpgradeDao getInstance() {
return PostgresqlUpgradeDaoHolder.INSTANCE;
}
@Override
public void initSchema(String initSqlPath) {
super.initSchema(initSqlPath);
}
private static String getSchema(){
Connection conn = null;
PreparedStatement pstmt = null;
try {
conn = ConnectionFactory.getDataSource().getConnection();
pstmt = conn.prepareStatement("select current_schema()");
ResultSet resultSet = pstmt.executeQuery();
while (resultSet.next()){
if(resultSet.isFirst()){
return resultSet.getString(1);
}
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(null, null, conn);
}
return "";
}
/**
* Determines whether a table exists
* @param tableName
* @return
*/
public boolean isExistsTable(String tableName) {
Connection conn = null;
try {
conn = ConnectionFactory.getDataSource().getConnection();
ResultSet rs = conn.getMetaData().getTables(null, schema, tableName, null);
if (rs.next()) {
return true;
} else {
return false;
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(null, null, conn);
}
}
/**
* Determines whether a field exists in the specified table
* @param tableName
* @param columnName
* @return
*/
public boolean isExistsColumn(String tableName,String columnName) {
Connection conn = null;
try {
conn = ConnectionFactory.getDataSource().getConnection();
ResultSet rs = conn.getMetaData().getColumns(null,schema,tableName,columnName);
if (rs.next()) {
return true;
} else {
return false;
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(null, null, conn);
}
}
}

129
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/UpgradeDao.java

@ -16,7 +16,8 @@
*/ */
package cn.escheduler.dao.upgrade; package cn.escheduler.dao.upgrade;
import cn.escheduler.common.utils.MysqlUtils; import cn.escheduler.common.enums.DbType;
import cn.escheduler.common.utils.ConnectionUtils;
import cn.escheduler.common.utils.ScriptRunner; import cn.escheduler.common.utils.ScriptRunner;
import cn.escheduler.dao.AbstractBaseDao; import cn.escheduler.dao.AbstractBaseDao;
import cn.escheduler.dao.datasource.ConnectionFactory; import cn.escheduler.dao.datasource.ConnectionFactory;
@ -29,8 +30,9 @@ import java.sql.Connection;
import java.sql.PreparedStatement; import java.sql.PreparedStatement;
import java.sql.ResultSet; import java.sql.ResultSet;
import java.sql.SQLException; import java.sql.SQLException;
import java.text.MessageFormat;
public class UpgradeDao extends AbstractBaseDao { public abstract class UpgradeDao extends AbstractBaseDao {
public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class); public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class);
private static final String T_VERSION_NAME = "t_escheduler_version"; private static final String T_VERSION_NAME = "t_escheduler_version";
@ -41,35 +43,59 @@ public class UpgradeDao extends AbstractBaseDao {
} }
private static class UpgradeDaoHolder { /**
private static final UpgradeDao INSTANCE = new UpgradeDao(); * get db type
* @return
*/
public static DbType getDbType(){
try {
Connection conn = ConnectionFactory.getDataSource().getConnection();
String name = conn.getMetaData().getDatabaseProductName().toUpperCase();
return DbType.valueOf(name);
} catch (Exception e) {
logger.error(e.getMessage(),e);
return null;
} }
private UpgradeDao() {
} }
public static final UpgradeDao getInstance() { public void initSchema(){
return UpgradeDaoHolder.INSTANCE; DbType dbType = getDbType();
String initSqlPath = "";
if (dbType != null) {
switch (dbType) {
case MYSQL:
initSqlPath = "/sql/create/release-1.0.0_schema/mysql/";
initSchema(initSqlPath);
break;
case POSTGRESQL:
initSqlPath = "/sql/create/release-1.2.0_schema/postgresql/";
initSchema(initSqlPath);
break;
default:
logger.error("not support sql type: {},can't upgrade", dbType);
throw new IllegalArgumentException("not support sql type,can't upgrade");
}
}
} }
public void initSchema(String initSqlPath) {
public void initEschedulerSchema() {
// Execute the escheduler DDL, it cannot be rolled back // Execute the escheduler DDL, it cannot be rolled back
runInitEschedulerDDL(); runInitDDL(initSqlPath);
// Execute the escheduler DML, it can be rolled back // Execute the escheduler DML, it can be rolled back
runInitEschedulerDML(); runInitDML(initSqlPath);
} }
private void runInitEschedulerDML() { private void runInitDML(String initSqlPath) {
Connection conn = null; Connection conn = null;
if (StringUtils.isEmpty(rootDir)) { if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found"); throw new RuntimeException("Environment variable user.dir not found");
} }
String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql"; //String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/escheduler_dml.sql";
String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_dml.sql";
try { try {
conn = ConnectionFactory.getDataSource().getConnection(); conn = ConnectionFactory.getDataSource().getConnection();
conn.setAutoCommit(false); conn.setAutoCommit(false);
@ -98,18 +124,19 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e); logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e);
} finally { } finally {
MysqlUtils.releaseResource(null, null, conn); ConnectionUtils.releaseResource(null, null, conn);
} }
} }
private void runInitEschedulerDDL() { private void runInitDDL(String initSqlPath) {
Connection conn = null; Connection conn = null;
if (StringUtils.isEmpty(rootDir)) { if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found"); throw new RuntimeException("Environment variable user.dir not found");
} }
String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/escheduler_ddl.sql"; //String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql";
String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_ddl.sql";
try { try {
conn = ConnectionFactory.getDataSource().getConnection(); conn = ConnectionFactory.getDataSource().getConnection();
// Execute the escheduler_ddl.sql script to create the table structure of escheduler // Execute the escheduler_ddl.sql script to create the table structure of escheduler
@ -126,7 +153,7 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e); logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e);
} finally { } finally {
MysqlUtils.releaseResource(null, null, conn); ConnectionUtils.releaseResource(null, null, conn);
} }
@ -137,26 +164,7 @@ public class UpgradeDao extends AbstractBaseDao {
* @param tableName * @param tableName
* @return * @return
*/ */
public boolean isExistsTable(String tableName) { public abstract boolean isExistsTable(String tableName);
Connection conn = null;
try {
conn = ConnectionFactory.getDataSource().getConnection();
ResultSet rs = conn.getMetaData().getTables(null, null, tableName, null);
if (rs.next()) {
return true;
} else {
return false;
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
MysqlUtils.releaseResource(null, null, conn);
}
}
/** /**
* Determines whether a field exists in the specified table * Determines whether a field exists in the specified table
@ -164,26 +172,7 @@ public class UpgradeDao extends AbstractBaseDao {
* @param columnName * @param columnName
* @return * @return
*/ */
public boolean isExistsColumn(String tableName,String columnName) { public abstract boolean isExistsColumn(String tableName,String columnName);
Connection conn = null;
try {
conn = ConnectionFactory.getDataSource().getConnection();
ResultSet rs = conn.getMetaData().getColumns(null,null,tableName,columnName);
if (rs.next()) {
return true;
} else {
return false;
}
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
MysqlUtils.releaseResource(null, null, conn);
}
}
public String getCurrentVersion() { public String getCurrentVersion() {
@ -207,26 +196,26 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e); logger.error(e.getMessage(),e);
throw new RuntimeException("sql: " + sql, e); throw new RuntimeException("sql: " + sql, e);
} finally { } finally {
MysqlUtils.releaseResource(rs, pstmt, conn); ConnectionUtils.releaseResource(rs, pstmt, conn);
} }
} }
public void upgradeEscheduler(String schemaDir) { public void upgradeDolphinScheduler(String schemaDir) {
upgradeEschedulerDDL(schemaDir); upgradeDolphinSchedulerDDL(schemaDir);
upgradeEschedulerDML(schemaDir); upgradeDolphinSchedulerDML(schemaDir);
} }
private void upgradeEschedulerDML(String schemaDir) { private void upgradeDolphinSchedulerDML(String schemaDir) {
String schemaVersion = schemaDir.split("_")[0]; String schemaVersion = schemaDir.split("_")[0];
if (StringUtils.isEmpty(rootDir)) { if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found"); throw new RuntimeException("Environment variable user.dir not found");
} }
String mysqlSQLFilePath = rootDir + "/sql/upgrade/" + schemaDir + "/mysql/escheduler_dml.sql"; String mysqlSQLFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_dml.sql",rootDir,schemaDir,getDbType().name().toLowerCase());
logger.info("mysqlSQLFilePath"+mysqlSQLFilePath);
Connection conn = null; Connection conn = null;
PreparedStatement pstmt = null; PreparedStatement pstmt = null;
try { try {
@ -277,16 +266,16 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e); logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e);
} finally { } finally {
MysqlUtils.releaseResource(null, pstmt, conn); ConnectionUtils.releaseResource(null, pstmt, conn);
} }
} }
private void upgradeEschedulerDDL(String schemaDir) { private void upgradeDolphinSchedulerDDL(String schemaDir) {
if (StringUtils.isEmpty(rootDir)) { if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found"); throw new RuntimeException("Environment variable user.dir not found");
} }
String mysqlSQLFilePath = rootDir + "/sql/upgrade/" + schemaDir + "/mysql/escheduler_ddl.sql"; String mysqlSQLFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_dml.sql",rootDir,schemaDir,getDbType().name().toLowerCase());
Connection conn = null; Connection conn = null;
PreparedStatement pstmt = null; PreparedStatement pstmt = null;
try { try {
@ -316,7 +305,7 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e); logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e);
} finally { } finally {
MysqlUtils.releaseResource(null, pstmt, conn); ConnectionUtils.releaseResource(null, pstmt, conn);
} }
} }
@ -338,7 +327,7 @@ public class UpgradeDao extends AbstractBaseDao {
logger.error(e.getMessage(),e); logger.error(e.getMessage(),e);
throw new RuntimeException("sql: " + upgradeSQL, e); throw new RuntimeException("sql: " + upgradeSQL, e);
} finally { } finally {
MysqlUtils.releaseResource(null, pstmt, conn); ConnectionUtils.releaseResource(null, pstmt, conn);
} }
} }

22
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java → escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateDolphinScheduler.java

@ -16,29 +16,29 @@
*/ */
package cn.escheduler.dao.upgrade.shell; package cn.escheduler.dao.upgrade.shell;
import cn.escheduler.dao.upgrade.EschedulerManager; import cn.escheduler.dao.upgrade.DolphinSchedulerManager;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
/** /**
* init escheduler * init DolphinScheduler
* *
*/ */
public class CreateEscheduler { public class CreateDolphinScheduler {
private static final Logger logger = LoggerFactory.getLogger(CreateEscheduler.class); private static final Logger logger = LoggerFactory.getLogger(CreateDolphinScheduler.class);
public static void main(String[] args) { public static void main(String[] args) {
EschedulerManager eschedulerManager = new EschedulerManager(); DolphinSchedulerManager dolphinSchedulerManager = new DolphinSchedulerManager();
try { try {
eschedulerManager.initEscheduler(); dolphinSchedulerManager.initDolphinScheduler();
logger.info("init escheduler finished"); logger.info("init DolphinScheduler finished");
eschedulerManager.upgradeEscheduler(); dolphinSchedulerManager.upgradeDolphinScheduler();
logger.info("upgrade escheduler finished"); logger.info("upgrade DolphinScheduler finished");
logger.info("create escheduler success"); logger.info("create DolphinScheduler success");
} catch (Exception e) { } catch (Exception e) {
logger.error("create escheduler failed",e); logger.error("create DolphinScheduler failed",e);
} }
} }

16
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitEscheduler.java → escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/InitDolphinScheduler.java

@ -16,23 +16,23 @@
*/ */
package cn.escheduler.dao.upgrade.shell; package cn.escheduler.dao.upgrade.shell;
import cn.escheduler.dao.upgrade.EschedulerManager; import cn.escheduler.dao.upgrade.DolphinSchedulerManager;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
/** /**
* init escheduler * init DolphinScheduler
* *
*/ */
public class InitEscheduler { public class InitDolphinScheduler {
private static final Logger logger = LoggerFactory.getLogger(InitEscheduler.class); private static final Logger logger = LoggerFactory.getLogger(InitDolphinScheduler.class);
public static void main(String[] args) { public static void main(String[] args) {
Thread.currentThread().setName("manager-InitEscheduler"); Thread.currentThread().setName("manager-InitDolphinScheduler");
EschedulerManager eschedulerManager = new EschedulerManager(); DolphinSchedulerManager dolphinSchedulerManager = new DolphinSchedulerManager();
eschedulerManager.initEscheduler(); dolphinSchedulerManager.initDolphinScheduler();
logger.info("init escheduler finished"); logger.info("init DolphinScheduler finished");
} }
} }

18
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java → escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeDolphinScheduler.java

@ -16,28 +16,26 @@
*/ */
package cn.escheduler.dao.upgrade.shell; package cn.escheduler.dao.upgrade.shell;
import cn.escheduler.dao.upgrade.EschedulerManager; import cn.escheduler.dao.upgrade.DolphinSchedulerManager;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
/** /**
* upgrade escheduler database * upgrade DolphinScheduler database
*/ */
public class UpgradeEscheduler { public class UpgradeDolphinScheduler {
private static final Logger logger = LoggerFactory.getLogger(UpgradeEscheduler.class); private static final Logger logger = LoggerFactory.getLogger(UpgradeDolphinScheduler.class);
public static void main(String[] args) { public static void main(String[] args) {
EschedulerManager eschedulerManager = new EschedulerManager(); DolphinSchedulerManager dolphinSchedulerManager = new DolphinSchedulerManager();
try { try {
eschedulerManager.upgradeEscheduler(); dolphinSchedulerManager.upgradeDolphinScheduler();
logger.info("upgrade escheduler success"); logger.info("upgrade DolphinScheduler success");
} catch (Exception e) { } catch (Exception e) {
logger.error(e.getMessage(),e); logger.error(e.getMessage(),e);
logger.info("Upgrade escheduler failed"); logger.info("Upgrade DolphinScheduler failed");
throw new RuntimeException(e); throw new RuntimeException(e);
} }
} }

6
escheduler-server/src/main/java/cn/escheduler/server/master/MasterServer.java

@ -33,6 +33,7 @@ import org.quartz.SchedulerException;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.WebApplicationType; import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.ComponentScan;
@ -90,10 +91,9 @@ public class MasterServer extends AbstractServer {
* master server not use web service * master server not use web service
*/ */
public static void main(String[] args) { public static void main(String[] args) {
SpringApplicationBuilder app = new SpringApplicationBuilder(MasterServer.class); SpringApplication app = new SpringApplication(MasterServer.class);
app.web(WebApplicationType.NONE) app.run(args);
.run(args);
} }

110
escheduler-server/src/main/java/cn/escheduler/server/utils/FlinkArgsUtils.java

@ -0,0 +1,110 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.server.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ProgramType;
import cn.escheduler.common.task.flink.FlinkParameters;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.List;
/**
* spark args utils
*/
public class FlinkArgsUtils {
/**
* build args
* @param param
* @return
*/
public static List<String> buildArgs(FlinkParameters param) {
List<String> args = new ArrayList<>();
args.add(Constants.FLINK_RUN_MODE); //-m
args.add(Constants.FLINK_YARN_CLUSTER); //yarn-cluster
if (param.getSlot() != 0) {
args.add(Constants.FLINK_YARN_SLOT);
args.add(String.format("%d", param.getSlot())); //-ys
}
if (StringUtils.isNotEmpty(param.getAppName())) { //-ynm
args.add(Constants.FLINK_APP_NAME);
args.add(param.getAppName());
}
if (param.getTaskManager() != 0) { //-yn
args.add(Constants.FLINK_TASK_MANAGE);
args.add(String.format("%d", param.getTaskManager()));
}
if (StringUtils.isNotEmpty(param.getJobManagerMemory())) {
args.add(Constants.FLINK_JOB_MANAGE_MEM);
args.add(param.getJobManagerMemory()); //-yjm
}
if (StringUtils.isNotEmpty(param.getTaskManagerMemory())) { // -ytm
args.add(Constants.FLINK_TASK_MANAGE_MEM);
args.add(param.getTaskManagerMemory());
}
args.add(Constants.FLINK_detach); //-d
if(param.getProgramType() !=null ){
if(param.getProgramType()!=ProgramType.PYTHON){
if (StringUtils.isNotEmpty(param.getMainClass())) {
args.add(Constants.FLINK_MAIN_CLASS); //-c
args.add(param.getMainClass()); //main class
}
}
}
if (param.getMainJar() != null) {
args.add(param.getMainJar().getRes());
}
// --files --conf --libjar ...
if (StringUtils.isNotEmpty(param.getOthers())) {
String others = param.getOthers();
if(!others.contains("--queue")){
if (StringUtils.isNotEmpty(param.getQueue())) {
args.add(Constants.SPARK_QUEUE);
args.add(param.getQueue());
}
}
args.add(param.getOthers());
}else if (StringUtils.isNotEmpty(param.getQueue())) {
args.add(Constants.SPARK_QUEUE);
args.add(param.getQueue());
}
if (StringUtils.isNotEmpty(param.getMainArgs())) {
args.add(param.getMainArgs());
}
return args;
}
}

6
escheduler-server/src/main/java/cn/escheduler/server/worker/WorkerServer.java

@ -39,6 +39,7 @@ import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.WebApplicationType; import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.ComponentScan;
@ -121,10 +122,9 @@ public class WorkerServer extends AbstractServer {
*/ */
public static void main(String[] args) { public static void main(String[] args) {
SpringApplicationBuilder app = new SpringApplicationBuilder(WorkerServer.class); SpringApplication app = new SpringApplication(WorkerServer.class);
app.web(WebApplicationType.NONE) app.run(args);
.run(args);
} }

5
escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java

@ -187,8 +187,9 @@ public class FetchTaskThread implements Runnable{
continue; continue;
} }
// set queue for process instance // set queue for process instance, user-specified queue takes precedence over tenant queue
taskInstance.getProcessInstance().setQueue(tenant.getQueue()); String userQueue = processDao.queryUserQueueByProcessInstanceId(taskInstance.getProcessInstanceId());
taskInstance.getProcessInstance().setQueue(StringUtils.isEmpty(userQueue) ? tenant.getQueue() : userQueue);
logger.info("worker fetch taskId : {} from queue ", taskInstId); logger.info("worker fetch taskId : {} from queue ", taskInstId);

3
escheduler-server/src/main/java/cn/escheduler/server/worker/task/AbstractTask.java

@ -22,6 +22,7 @@ import cn.escheduler.common.enums.TaskRecordStatus;
import cn.escheduler.common.enums.TaskType; import cn.escheduler.common.enums.TaskType;
import cn.escheduler.common.process.Property; import cn.escheduler.common.process.Property;
import cn.escheduler.common.task.AbstractParameters; import cn.escheduler.common.task.AbstractParameters;
import cn.escheduler.common.task.flink.FlinkParameters;
import cn.escheduler.common.task.mr.MapreduceParameters; import cn.escheduler.common.task.mr.MapreduceParameters;
import cn.escheduler.common.task.procedure.ProcedureParameters; import cn.escheduler.common.task.procedure.ProcedureParameters;
import cn.escheduler.common.task.python.PythonParameters; import cn.escheduler.common.task.python.PythonParameters;
@ -178,6 +179,8 @@ public abstract class AbstractTask {
case SPARK: case SPARK:
paramsClass = SparkParameters.class; paramsClass = SparkParameters.class;
break; break;
case FLINK:
paramsClass = FlinkParameters.class;
case PYTHON: case PYTHON:
paramsClass = PythonParameters.class; paramsClass = PythonParameters.class;
break; break;

3
escheduler-server/src/main/java/cn/escheduler/server/worker/task/TaskManager.java

@ -19,6 +19,7 @@ package cn.escheduler.server.worker.task;
import cn.escheduler.common.enums.TaskType; import cn.escheduler.common.enums.TaskType;
import cn.escheduler.server.worker.task.dependent.DependentTask; import cn.escheduler.server.worker.task.dependent.DependentTask;
import cn.escheduler.server.worker.task.flink.FlinkTask;
import cn.escheduler.server.worker.task.mr.MapReduceTask; import cn.escheduler.server.worker.task.mr.MapReduceTask;
import cn.escheduler.server.worker.task.processdure.ProcedureTask; import cn.escheduler.server.worker.task.processdure.ProcedureTask;
import cn.escheduler.server.worker.task.python.PythonTask; import cn.escheduler.server.worker.task.python.PythonTask;
@ -55,6 +56,8 @@ public class TaskManager {
return new MapReduceTask(props, logger); return new MapReduceTask(props, logger);
case SPARK: case SPARK:
return new SparkTask(props, logger); return new SparkTask(props, logger);
case FLINK:
return new FlinkTask(props, logger);
case PYTHON: case PYTHON:
return new PythonTask(props, logger); return new PythonTask(props, logger);
case DEPENDENT: case DEPENDENT:

118
escheduler-server/src/main/java/cn/escheduler/server/worker/task/flink/FlinkTask.java

@ -0,0 +1,118 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.server.worker.task.flink;
import cn.escheduler.common.process.Property;
import cn.escheduler.common.task.AbstractParameters;
import cn.escheduler.common.task.flink.FlinkParameters;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.ProcessInstance;
import cn.escheduler.server.utils.FlinkArgsUtils;
import cn.escheduler.server.utils.ParamUtils;
import cn.escheduler.server.worker.task.AbstractYarnTask;
import cn.escheduler.server.worker.task.TaskProps;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* flink task
*/
public class FlinkTask extends AbstractYarnTask {
/**
* flink command
*/
private static final String FLINK_COMMAND = "flink";
private static final String FLINK_RUN = "run";
/**
* flink parameters
*/
private FlinkParameters flinkParameters;
public FlinkTask(TaskProps props, Logger logger) {
super(props, logger);
}
@Override
public void init() {
logger.info("flink task params {}", taskProps.getTaskParams());
flinkParameters = JSONUtils.parseObject(taskProps.getTaskParams(), FlinkParameters.class);
if (!flinkParameters.checkParameters()) {
throw new RuntimeException("flink task params is not valid");
}
flinkParameters.setQueue(taskProps.getQueue());
if (StringUtils.isNotEmpty(flinkParameters.getMainArgs())) {
String args = flinkParameters.getMainArgs();
// get process instance by task instance id
ProcessInstance processInstance = processDao.findProcessInstanceByTaskId(taskProps.getTaskInstId());
/**
* combining local and global parameters
*/
Map<String, Property> paramsMap = ParamUtils.convert(taskProps.getUserDefParamsMap(),
taskProps.getDefinedParams(),
flinkParameters.getLocalParametersMap(),
processInstance.getCmdTypeIfComplement(),
processInstance.getScheduleTime());
logger.info("param Map : {}", paramsMap);
if (paramsMap != null ){
args = ParameterUtils.convertParameterPlaceholders(args, ParamUtils.convert(paramsMap));
logger.info("param args : {}", args);
}
flinkParameters.setMainArgs(args);
}
}
/**
* create command
* @return
*/
@Override
protected String buildCommand() {
List<String> args = new ArrayList<>();
args.add(FLINK_COMMAND);
args.add(FLINK_RUN);
logger.info("flink task args : {}", args);
// other parameters
args.addAll(FlinkArgsUtils.buildArgs(flinkParameters));
String command = ParameterUtils
.convertParameterPlaceholders(String.join(" ", args), taskProps.getDefinedParams());
logger.info("flink task command : {}", command);
return command;
}
@Override
public AbstractParameters getParameters() {
return flinkParameters;
}
}

3
escheduler-server/src/main/resources/application_master.properties

@ -1 +1,4 @@
logging.config=classpath:master_logback.xml logging.config=classpath:master_logback.xml
# server port
server.port=5566

4
escheduler-server/src/main/resources/application_worker.properties

@ -0,0 +1,4 @@
logging.config=classpath:worker_logback.xml
# server port
server.port=7788

4
escheduler-ui/build/webpack.config.prod.js

@ -100,8 +100,10 @@ const config = merge.smart(baseConfig, {
sourceMap: true, sourceMap: true,
uglifyOptions: { uglifyOptions: {
compress: { compress: {
warnings: false,
drop_debugger: true,
drop_console: true, drop_console: true,
drop_debugger: true pure_funcs: ['console.log']//移除console
}, },
comments: function (n, c) { comments: function (n, c) {
/*! IMPORTANT: Please preserve 3rd-party library license info, inspired from @allex/amd-build-worker/config/jsplumb.js */ /*! IMPORTANT: Please preserve 3rd-party library license info, inspired from @allex/amd-build-worker/config/jsplumb.js */

80
escheduler-ui/install-escheduler-ui.sh

@ -1,21 +1,21 @@
#!/bin/bash #!/bin/bash
# 当前路径 # current path
esc_basepath=$(cd `dirname $0`; pwd) esc_basepath=$(cd `dirname $0`; pwd)
menu(){ menu(){
cat <<END cat <<END
================================================= =================================================
1.CentOS6安装 1.CentOS6 Installation
2.CentOS7安装 2.CentOS7 Installation
3.Ubuntu安装 3.Ubuntu Installation
4.退出 4.Exit
================================================= =================================================
END END
} }
# 创建文件并配置nginx # create a file and configure nginx
eschedulerConf(){ dolphinschedulerConf(){
E_host='$host' E_host='$host'
E_remote_addr='$remote_addr' E_remote_addr='$remote_addr'
@ -23,16 +23,16 @@ eschedulerConf(){
E_http_upgrade='$http_upgrade' E_http_upgrade='$http_upgrade'
echo " echo "
server { server {
listen $1;# 访问端口 listen $1;# access port
server_name localhost; server_name localhost;
#charset koi8-r; #charset koi8-r;
#access_log /var/log/nginx/host.access.log main; #access_log /var/log/nginx/host.access.log main;
location / { location / {
root ${esc_basepath}/dist; # 静态文件目录 root ${esc_basepath}/dist; # static file directory
index index.html index.html; index index.html index.html;
} }
location /escheduler { location /escheduler {
proxy_pass $2; # 接口地址 proxy_pass $2; # interface address
proxy_set_header Host $E_host; proxy_set_header Host $E_host;
proxy_set_header X-Real-IP $E_remote_addr; proxy_set_header X-Real-IP $E_remote_addr;
proxy_set_header x_real_ipP $E_remote_addr; proxy_set_header x_real_ipP $E_remote_addr;
@ -53,21 +53,21 @@ eschedulerConf(){
root /usr/share/nginx/html; root /usr/share/nginx/html;
} }
} }
" >> /etc/nginx/conf.d/escheduler.conf " >> /etc/nginx/conf.d/dolphinscheduler.conf
} }
ubuntu(){ ubuntu(){
#更新源 # update source
apt-get update apt-get update
#安装nginx # install nginx
apt-get install -y nginx apt-get install -y nginx
# 配置nginx # config nginx
eschedulerConf $1 $2 dolphinschedulerConf $1 $2
# 启动nginx # startup nginx
/etc/init.d/nginx start /etc/init.d/nginx start
sleep 1 sleep 1
if [ $? -ne 0 ];then if [ $? -ne 0 ];then
@ -81,17 +81,17 @@ centos7(){
rpm -Uvh http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm rpm -Uvh http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm
yum install -y nginx yum install -y nginx
# 配置nginx # config nginx
eschedulerConf $1 $2 dolphinschedulerConf $1 $2
# 解决 0.0.0.0:8888 问题 # solve 0.0.0.0:8888 problem
yum -y install policycoreutils-python yum -y install policycoreutils-python
semanage port -a -t http_port_t -p tcp $esc_proxy semanage port -a -t http_port_t -p tcp $esc_proxy
# 开放前端访问端口 # open front access port
firewall-cmd --zone=public --add-port=$esc_proxy/tcp --permanent firewall-cmd --zone=public --add-port=$esc_proxy/tcp --permanent
# 启动nginx # startup nginx
systemctl start nginx systemctl start nginx
sleep 1 sleep 1
if [ $? -ne 0 ];then if [ $? -ne 0 ];then
@ -99,9 +99,9 @@ centos7(){
fi fi
nginx -s reload nginx -s reload
# 调整SELinux的参数 # set SELinux parameters
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 临时生效 # temporary effect
setenforce 0 setenforce 0
} }
@ -114,10 +114,10 @@ centos6(){
# install nginx # install nginx
yum install nginx -y yum install nginx -y
# 配置nginx # config nginx
eschedulerConf $1 $2 dolphinschedulerConf $1 $2
# 启动nginx # startup nginx
/etc/init.d/nginx start /etc/init.d/nginx start
sleep 1 sleep 1
if [ $? -ne 0 ];then if [ $? -ne 0 ];then
@ -125,17 +125,17 @@ centos6(){
fi fi
nginx -s reload nginx -s reload
# 调整SELinux的参数 # set SELinux parameters
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 临时生效 # temporary effect
setenforce 0 setenforce 0
} }
function main(){ function main(){
echo "欢迎使用easy scheduler前端部署脚本,目前前端部署脚本仅支持CentOS,Ubuntu" echo "Welcome to thedolphinscheduler front-end deployment script, which is currently only supported by front-end deployment scripts : CentOS and Ubuntu"
echo "请在 escheduler-ui 目录下执行" echo "Please execute in the dolphinscheduler-ui directory"
#To be compatible with MacOS and Linux #To be compatible with MacOS and Linux
if [[ "$OSTYPE" == "darwin"* ]]; then if [[ "$OSTYPE" == "darwin"* ]]; then
@ -166,33 +166,33 @@ function main(){
fi fi
# 配置前端访问端口 # config front-end access ports
read -p "请输入nginx代理端口,不输入,则默认8888 :" esc_proxy_port read -p "Please enter the nginx proxy port, do not enter, the default is 8888 :" esc_proxy_port
if [ -z "${esc_proxy_port}" ];then if [ -z "${esc_proxy_port}" ];then
esc_proxy_port="8888" esc_proxy_port="8888"
fi fi
read -p "请输入api server代理ip,必须输入,例如:192.168.xx.xx :" esc_api_server_ip read -p "Please enter the api server proxy ip, you must enter, for example: 192.168.xx.xx :" esc_api_server_ip
if [ -z "${esc_api_server_ip}" ];then if [ -z "${esc_api_server_ip}" ];then
echo "api server代理ip不能为空." echo "api server proxy ip can not be empty."
exit 1 exit 1
fi fi
read -p "请输入api server代理端口,不输入,则默认12345 :" esc_api_server_port read -p "Please enter the api server proxy port, do not enter, the default is 12345:" esc_api_server_port
if [ -z "${esc_api_server_port}" ];then if [ -z "${esc_api_server_port}" ];then
esc_api_server_port="12345" esc_api_server_port="12345"
fi fi
# api server后端地址 # api server backend address
esc_api_server="http://$esc_api_server_ip:$esc_api_server_port" esc_api_server="http://$esc_api_server_ip:$esc_api_server_port"
# 本机ip地址 # local ip address
esc_ipaddr=$(ip a | grep inet | grep -v inet6 | grep -v 127 | sed 's/^[ \t]*//g' | cut -d ' ' -f2 | head -n 1 | awk -F '/' '{print $1}') esc_ipaddr=$(ip a | grep inet | grep -v inet6 | grep -v 127 | sed 's/^[ \t]*//g' | cut -d ' ' -f2 | head -n 1 | awk -F '/' '{print $1}')
# 提示信息 # Prompt message
menu menu
read -p "请输入安装编号(1|2|3|4):" num read -p "Please enter the installation number(1|2|3|4):" num
case $num in case $num in
1) 1)
@ -212,7 +212,7 @@ function main(){
echo $"Usage :sh $0" echo $"Usage :sh $0"
exit 1 exit 1
esac esac
echo "请浏览器访问:http://${esc_ipaddr}:${esc_proxy_port}" echo "Please visit the browser:http://${esc_ipaddr}:${esc_proxy_port}"
} }

12
escheduler-ui/package.json

@ -1,5 +1,5 @@
{ {
"name": "escheduler", "name": "dolphinscheduler",
"version": "1.0.0", "version": "1.0.0",
"description": "调度平台前端项目", "description": "调度平台前端项目",
"author": "gongzijian <gongzijian@analysys.com.cn>", "author": "gongzijian <gongzijian@analysys.com.cn>",
@ -27,7 +27,6 @@
"babel-plugin-transform-runtime": "^6.23.0", "babel-plugin-transform-runtime": "^6.23.0",
"babel-plugin-transform-vue-jsx": "^3.5.0", "babel-plugin-transform-vue-jsx": "^3.5.0",
"babel-preset-env": "^1.6.1", "babel-preset-env": "^1.6.1",
"babel-runtime": "^6.26.0",
"bootstrap": "3.3.7", "bootstrap": "3.3.7",
"canvg": "1.5", "canvg": "1.5",
"clipboard": "^2.0.1", "clipboard": "^2.0.1",
@ -77,16 +76,7 @@
] ]
}, },
"devDependencies": { "devDependencies": {
"jasmine-core": "^3.2.1",
"jquery": "1.12.4", "jquery": "1.12.4",
"karma": "^3.0.0",
"karma-browserstack-launcher": "^1.3.0",
"karma-chrome-launcher": "^2.2.0",
"karma-coverage": "^1.1.2",
"karma-jasmine": "^1.1.2",
"karma-sourcemap-loader": "^0.3.7",
"karma-spec-reporter": "^0.0.32",
"karma-webpack": "^3.0.0",
"vue": "^2.5.17", "vue": "^2.5.17",
"vue-router": "2.7.0", "vue-router": "2.7.0",
"vuex": "^3.0.0" "vuex": "^3.0.0"

4
escheduler-ui/src/js/conf/home/pages/dag/_source/config.js

@ -260,6 +260,10 @@ let tasksType = {
desc: 'SPARK', desc: 'SPARK',
color: '#E46F13' color: '#E46F13'
}, },
'FLINK': {
desc: 'FLINK',
color: '#E46F13'
},
'MR': { 'MR': {
desc: 'MapReduce', desc: 'MapReduce',
color: '#A0A5CC' color: '#A0A5CC'

3
escheduler-ui/src/js/conf/home/pages/dag/_source/dag.scss

@ -70,6 +70,9 @@
.icos-SPARK { .icos-SPARK {
background: url("../img/toolbar_SPARK.png") no-repeat 50% 50%; background: url("../img/toolbar_SPARK.png") no-repeat 50% 50%;
} }
.icos-FLINK {
background: url("../img/toobar_flink.svg") no-repeat 50% 50%;
}
.icos-MR { .icos-MR {
background: url("../img/toolbar_MR.png") no-repeat 50% 50%; background: url("../img/toolbar_MR.png") no-repeat 50% 50%;
} }

8
escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue

@ -138,6 +138,12 @@
ref="SPARK" ref="SPARK"
:backfill-item="backfillItem"> :backfill-item="backfillItem">
</m-spark> </m-spark>
<m-flink
v-if="taskType === 'FLINK'"
@on-params="_onParams"
ref="FLINK"
:backfill-item="backfillItem">
</m-flink>
<!-- mr node --> <!-- mr node -->
<m-mr <m-mr
v-if="taskType === 'MR'" v-if="taskType === 'MR'"
@ -178,6 +184,7 @@
import i18n from '@/module/i18n' import i18n from '@/module/i18n'
import mShell from './tasks/shell' import mShell from './tasks/shell'
import mSpark from './tasks/spark' import mSpark from './tasks/spark'
import mFlink from './tasks/flink'
import mPython from './tasks/python' import mPython from './tasks/python'
import JSP from './../plugIn/jsPlumbHandle' import JSP from './../plugIn/jsPlumbHandle'
import mProcedure from './tasks/procedure' import mProcedure from './tasks/procedure'
@ -455,6 +462,7 @@
mSql, mSql,
mLog, mLog,
mSpark, mSpark,
mFlink,
mPython, mPython,
mDependent, mDependent,
mSelectInput, mSelectInput,

92
escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/_source/dependItemList.vue

@ -1,15 +1,19 @@
<template> <template>
<div class="dep-list-model"> <div class="dep-list-model">
<div v-for="(el,$index) in dependItemList" class="list" @click="itemIndex = $index"> <div v-for="(el,$index) in dependItemList" :key='$index' class="list" @click="itemIndex = $index">
<x-select filterable :style="{width:isInstance ? '140px' : '162px'}" :disabled="isDetails" v-model="el.definitionId" @on-change="_onChangeDefinitionId"> <x-select filterable :style="{width:isInstance ? '450px' : '450px'}" :disabled="isDetails" v-model="el.projectId" @on-change="_onChangeProjectId">
<x-option v-for="item in projectList" :key="item.value" :value="item.value" :label="item.label">
</x-option>
</x-select>
<x-select filterable :style="{width:isInstance ? '450px' : '450px'}" :disabled="isDetails" v-model="el.definitionId" @on-change="_onChangeDefinitionId">
<x-option v-for="item in definitionList" :key="item.value" :value="item.value" :label="item.label"> <x-option v-for="item in definitionList" :key="item.value" :value="item.value" :label="item.label">
</x-option> </x-option>
</x-select> </x-select>
<x-select filterable :style="{width:isInstance ? '144px' : '156px'}" :disabled="isDetails" v-model="el.depTasks"> <x-select filterable :style="{width:isInstance ? '450px' : '450px'}" :disabled="isDetails" v-model="el.depTasks">
<x-option v-for="item in el.depTasksList || []" :key="item" :value="item" :label="item"> <x-option v-for="item in el.depTasksList || []" :key="item" :value="item" :label="item">
</x-option> </x-option>
</x-select> </x-select>
<x-select style="width: 80px;" v-model="el.cycle" :disabled="isDetails" @on-change="_onChangeCycle"> <x-select style="width: 150px;" v-model="el.cycle" :disabled="isDetails" @on-change="_onChangeCycle">
<x-option v-for="item in cycleList" :key="item.value" :value="item.value" :label="item.label"> <x-option v-for="item in cycleList" :key="item.value" :value="item.value" :label="item.label">
</x-option> </x-option>
</x-select> </x-select>
@ -45,6 +49,7 @@
return { return {
list: [], list: [],
definitionList: [], definitionList: [],
projectList: [],
cycleList: cycleList, cycleList: cycleList,
isInstance: false, isInstance: false,
itemIndex: null itemIndex: null
@ -53,7 +58,8 @@
mixins: [disabledState], mixins: [disabledState],
props: { props: {
dependItemList: Array, dependItemList: Array,
index: Number index: Number,
dependTaskList:Array
}, },
model: { model: {
prop: 'dependItemList', prop: 'dependItemList',
@ -72,9 +78,10 @@
let value = noArr[0] && noArr[0].value || null let value = noArr[0] && noArr[0].value || null
let val = value || this.definitionList[0].value let val = value || this.definitionList[0].value
// add task list // add task list
let projectId = this.projectList[0].value
this._getDependItemList(val).then(depTasksList => { this._getDependItemList(val).then(depTasksList => {
this.$nextTick(() => { this.$nextTick(() => {
this.$emit('dependItemListEvent', _.concat(this.dependItemList, this._rtNewParams(val, depTasksList))) this.$emit('dependItemListEvent', _.concat(this.dependItemList, this._rtNewParams(val, depTasksList,projectId)))
}) })
}) })
// remove tooltip // remove tooltip
@ -84,15 +91,25 @@
* remove task * remove task
*/ */
_remove (i) { _remove (i) {
this.dependItemList.splice(i, 1) this.dependTaskList[this.index].dependItemList.splice(i,1)
this._removeTip() this._removeTip()
if (!this.dependItemList.length || this.dependItemList.length === 0) {
if (!this.dependItemList.length) {
this.$emit('on-delete-all', { this.$emit('on-delete-all', {
index: this.index index: this.index
}) })
} }
}, },
_getProjectList () {
return new Promise((resolve, reject) => {
this.projectList = _.map(_.cloneDeep(this.store.state.dag.projectListS), v => {
return {
value: v.id,
label: v.name
}
})
resolve()
})
},
/** /**
* get processlist * get processlist
*/ */
@ -107,6 +124,19 @@
resolve() resolve()
}) })
}, },
_getProcessByProjectId (id) {
return new Promise((resolve, reject) => {
this.store.dispatch('dag/getProcessByProjectId', { projectId: id }).then(res => {
this.definitionList = _.map(_.cloneDeep(res), v => {
return {
value: v.id,
label: v.name
}
})
resolve(res)
})
})
},
/** /**
* get dependItemList * get dependItemList
*/ */
@ -126,6 +156,19 @@
/** /**
* change process get dependItemList * change process get dependItemList
*/ */
_onChangeProjectId ({ value }) {
this._getProcessByProjectId(value).then(definitionList => {
/*this.$set(this.dependItemList, this.itemIndex, this._dlOldParams(value, definitionList, item))*/
let definitionId = definitionList[0].id
this._getDependItemList(definitionId).then(depTasksList => {
let item = this.dependItemList[this.itemIndex]
// init set depTasks All
item.depTasks = 'ALL'
// set dependItemList item data
this.$set(this.dependItemList, this.itemIndex, this._cpOldParams(value,definitionId, definitionList,depTasksList, item))
})
})
},
_onChangeDefinitionId ({ value }) { _onChangeDefinitionId ({ value }) {
// get depItem list data // get depItem list data
this._getDependItemList(value).then(depTasksList => { this._getDependItemList(value).then(depTasksList => {
@ -141,19 +184,21 @@
this.$set(this.dependItemList[this.itemIndex], 'dateValue', list[0].value) this.$set(this.dependItemList[this.itemIndex], 'dateValue', list[0].value)
this.$set(this.dependItemList[this.itemIndex], 'dateValueList', list) this.$set(this.dependItemList[this.itemIndex], 'dateValueList', list)
}, },
_rtNewParams (value, depTasksList) { _rtNewParams (value, depTasksList,projectId) {
return { return {
projectId: projectId,
definitionId: value, definitionId: value,
depTasks: 'ALL', depTasks: 'ALL',
depTasksList: depTasksList, depTasksList: depTasksList,
cycle: 'day', cycle: 'day',
dateValue: 'last1Days', dateValue: 'today',
dateValueList: _.cloneDeep(dateValueList['day']), dateValueList: _.cloneDeep(dateValueList['day']),
state: '' state: ''
} }
}, },
_rtOldParams (value, depTasksList, item) { _rtOldParams (value,depTasksList, item) {
return { return {
projectId: item.projectId,
definitionId: value, definitionId: value,
depTasks: item.depTasks || 'ALL', depTasks: item.depTasks || 'ALL',
depTasksList: depTasksList, depTasksList: depTasksList,
@ -163,6 +208,20 @@
state: item.state state: item.state
} }
}, },
_cpOldParams (value,definitionId, definitionList,depTasksList, item) {
return {
projectId: value,
definitionList: definitionList,
definitionId: definitionId,
depTasks: item.depTasks || 'ALL',
depTasksList: depTasksList,
cycle: item.cycle,
dateValue: item.dateValue,
dateValueList: _.cloneDeep(dateValueList[item.cycle]),
state: item.state
}
},
/** /**
* remove tip * remove tip
*/ */
@ -178,11 +237,14 @@
// is type projects-instance-details // is type projects-instance-details
this.isInstance = this.router.history.current.name === 'projects-instance-details' this.isInstance = this.router.history.current.name === 'projects-instance-details'
// get processlist // get processlist
this._getProcessList().then(() => { this._getProjectList().then(() => {
let projectId = this.projectList[0].value
if (!this.dependItemList.length) { if (!this.dependItemList.length) {
this._getProcessByProjectId(projectId).then(definitionList => {
let value = this.definitionList[0].value let value = this.definitionList[0].value
this._getDependItemList(value).then(depTasksList => { this._getDependItemList(value).then(depTasksList => {
this.$emit('dependItemListEvent', _.concat(this.dependItemList, this._rtNewParams(value, depTasksList))) this.$emit('dependItemListEvent', _.concat(this.dependItemList, this._rtNewParams(value, depTasksList,projectId)))
})
}) })
} else { } else {
// get definitionId ids // get definitionId ids
@ -190,9 +252,11 @@
// get item list // get item list
this._getDependItemList(ids, false).then(res => { this._getDependItemList(ids, false).then(res => {
_.map(this.dependItemList, (v, i) => { _.map(this.dependItemList, (v, i) => {
this._getProcessByProjectId(v.projectId).then(definitionList => {
this.$set(this.dependItemList, i, this._rtOldParams(v.definitionId, ['ALL'].concat(_.map(res[v.definitionId] || [], v => v.name)), v)) this.$set(this.dependItemList, i, this._rtOldParams(v.definitionId, ['ALL'].concat(_.map(res[v.definitionId] || [], v => v.name)), v))
}) })
}) })
})
} }
}) })
}, },

13
escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/dependent.vue

@ -22,7 +22,7 @@
v-if="dependTaskList.length"> v-if="dependTaskList.length">
{{relation === 'AND' ? $t('and') : $t('or')}} {{relation === 'AND' ? $t('and') : $t('or')}}
</span> </span>
<div class="dep-list" v-for="(el,$index) in dependTaskList"> <div class="dep-list" v-for="(el,$index) in dependTaskList" :key='$index'>
<span class="dep-line-pie" <span class="dep-line-pie"
v-if="el.dependItemList.length" v-if="el.dependItemList.length"
@click="!isDetails && _setRelation($index)"> @click="!isDetails && _setRelation($index)">
@ -37,8 +37,10 @@
&#xe611; &#xe611;
</i> </i>
<m-depend-item-list <m-depend-item-list
:dependTaskList='dependTaskList'
v-model="el.dependItemList" v-model="el.dependItemList"
@on-delete-all="_onDeleteAll" @on-delete-all="_onDeleteAll"
@getDependTaskList="getDependTaskList"
:index="$index"> :index="$index">
</m-depend-item-list> </m-depend-item-list>
</div> </div>
@ -84,11 +86,18 @@
$('body').find('.tooltip.fade.top.in').remove() $('body').find('.tooltip.fade.top.in').remove()
}, },
_onDeleteAll (i) { _onDeleteAll (i) {
this._deleteDep(i) this.dependTaskList.map((item,i)=>{
if(item.dependItemList.length === 0){
this.dependTaskList.splice(i,1)
}
})
}, },
_setGlobalRelation () { _setGlobalRelation () {
this.relation = this.relation === 'AND' ? 'OR' : 'AND' this.relation = this.relation === 'AND' ? 'OR' : 'AND'
}, },
getDependTaskList(i){
// console.log('getDependTaskList',i)
},
_setRelation (i) { _setRelation (i) {
this.dependTaskList[i].relation = this.dependTaskList[i].relation === 'AND' ? 'OR' : 'AND' this.dependTaskList[i].relation = this.dependTaskList[i].relation === 'AND' ? 'OR' : 'AND'
}, },

388
escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue

@ -0,0 +1,388 @@
<template>
<div class="flink-model">
<m-list-box>
<div slot="text">{{$t('Program Type')}}</div>
<div slot="content">
<x-select
style="width: 130px;"
v-model="programType"
:disabled="isDetails">
<x-option
v-for="city in programTypeList"
:key="city.code"
:value="city.code"
:label="city.code">
</x-option>
</x-select>
</div>
</m-list-box>
<m-list-box v-if="programType !== 'PYTHON'">
<div slot="text">{{$t('Main class')}}</div>
<div slot="content">
<x-input
:disabled="isDetails"
type="input"
v-model="mainClass"
:placeholder="$t('Please enter main class')"
autocomplete="off">
</x-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Main jar package')}}</div>
<div slot="content">
<x-select
style="width: 100%;"
:placeholder="$t('Please enter main jar package')"
v-model="mainJar"
filterable
:disabled="isDetails">
<x-option
v-for="city in mainJarList"
:key="city.code"
:value="city.code"
:label="city.code">
</x-option>
</x-select>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Deploy Mode')}}</div>
<div slot="content">
<x-radio-group v-model="deployMode">
<x-radio :label="'cluster'" :disabled="isDetails"></x-radio>
</x-radio-group>
</div>
</m-list-box>
<div class="list-box-4p">
<div class="clearfix list">
<span class="sp1">{{$t('slot')}}</span>
<span class="sp2">
<x-input
:disabled="isDetails"
type="input"
v-model="slot"
:placeholder="$t('Please enter driver core number')"
style="width: 200px;"
autocomplete="off">
</x-input>
</span>
<span class="sp1 sp3">{{$t('taskManager')}}</span>
<span class="sp2">
<x-input
:disabled="isDetails"
type="input"
v-model="taskManager"
:placeholder="$t('Please enter driver memory use')"
style="width: 186px;"
autocomplete="off">
</x-input>
</span>
</div>
<div class="clearfix list">
<span class="sp1">{{$t('jobManagerMemory')}}</span>
<span class="sp2">
<x-input
:disabled="isDetails"
type="input"
v-model="jobManagerMemory"
:placeholder="$t('Please enter the number of Executor')"
style="width: 200px;"
autocomplete="off">
</x-input>
</span>
<span class="sp1 sp3">{{$t('taskManagerMemory')}}</span>
<span class="sp2">
<x-input
:disabled="isDetails"
type="input"
v-model="taskManagerMemory"
:placeholder="$t('Please enter the Executor memory')"
style="width: 186px;"
autocomplete="off">
</x-input>
</span>
</div>
</div>
<m-list-box>
<div slot="text">{{$t('Command-line parameters')}}</div>
<div slot="content">
<x-input
:autosize="{minRows:2}"
:disabled="isDetails"
type="textarea"
v-model="mainArgs"
:placeholder="$t('Please enter Command-line parameters')"
autocomplete="off">
</x-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Other parameters')}}</div>
<div slot="content">
<x-input
:disabled="isDetails"
:autosize="{minRows:2}"
type="textarea"
v-model="others"
:placeholder="$t('Please enter other parameters')">
</x-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<m-resources
ref="refResources"
@on-resourcesData="_onResourcesData"
:resource-list="resourceList">
</m-resources>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="localParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mLocalParams from './_source/localParams'
import mListBox from './_source/listBox'
import mResources from './_source/resources'
import disabledState from '@/module/mixin/disabledState'
export default {
name: 'flink',
data () {
return {
// Main function class
mainClass: '',
// Master jar package
mainJar: null,
// Master jar package(List)
mainJarList: [],
// Deployment method
deployMode: 'cluster',
// Resource(list)
resourceList: [],
// Custom function
localParams: [],
// Driver Number of cores
slot: 1,
// Driver Number of memory
taskManager: '2',
// Executor Number
jobManagerMemory: '1G',
// Executor Number of memory
taskManagerMemory: '2G',
// Executor Number of cores
executorCores: 2,
// Command line argument
mainArgs: '',
// Other parameters
others: '',
// Program type
programType: 'SCALA',
// Program type(List)
programTypeList: [{ code: 'JAVA' }, { code: 'SCALA' }, { code: 'PYTHON' }]
}
},
props: {
backfillItem: Object
},
mixins: [disabledState],
methods: {
/**
* return localParams
*/
_onLocalParams (a) {
this.localParams = a
},
/**
* return resourceList
*/
_onResourcesData (a) {
this.resourceList = a
},
/**
* verification
*/
_verification () {
if (this.programType !== 'PYTHON' && !this.mainClass) {
this.$message.warning(`${i18n.$t('Please enter main class')}`)
return false
}
if (!this.mainJar) {
this.$message.warning(`${i18n.$t('Please enter main jar package')}`)
return false
}
if (!this.jobManagerMemory) {
this.$message.warning(`${i18n.$t('Please enter the number of Executor')}`)
return false
}
if (!Number.isInteger(parseInt(this.jobManagerMemory))) {
this.$message.warning(`${i18n.$t('The number of Executors should be a positive integer')}`)
return false
}
if (!this.taskManagerMemory) {
this.$message.warning(`${i18n.$t('Please enter the Executor memory')}`)
return false
}
if (!this.taskManagerMemory) {
this.$message.warning(`${i18n.$t('Please enter the Executor memory')}`)
return false
}
if (!_.isNumber(parseInt(this.taskManagerMemory))) {
this.$message.warning(`${i18n.$t('Memory should be a positive integer')}`)
return false
}
if (!this.executorCores) {
this.$message.warning(`${i18n.$t('Please enter ExecutorPlease enter Executor core number')}`)
return false
}
if (!Number.isInteger(parseInt(this.executorCores))) {
this.$message.warning(`${i18n.$t('Core number should be positive integer')}`)
return false
}
if (!this.$refs.refResources._verifResources()) {
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
mainClass: this.mainClass,
mainJar: {
res: this.mainJar
},
deployMode: this.deployMode,
resourceList: this.resourceList,
localParams: this.localParams,
slot: this.slot,
taskManager: this.taskManager,
jobManagerMemory: this.jobManagerMemory,
taskManagerMemory: this.taskManagerMemory,
executorCores: this.executorCores,
mainArgs: this.mainArgs,
others: this.others,
programType: this.programType
})
return true
},
/**
* get resources list
*/
_getResourcesList () {
return new Promise((resolve, reject) => {
let isJar = (alias) => {
return alias.substring(alias.lastIndexOf('.') + 1, alias.length) !== 'jar'
}
this.mainJarList = _.map(_.cloneDeep(this.store.state.dag.resourcesListS), v => {
return {
id: v.id,
code: v.alias,
disabled: isJar(v.alias)
}
})
resolve()
})
}
},
watch: {
// Listening type
programType (type) {
if (type === 'PYTHON') {
this.mainClass = ''
}
}
},
created () {
this._getResourcesList().then(() => {
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.mainClass = o.params.mainClass || ''
this.mainJar = o.params.mainJar.res || ''
this.deployMode = o.params.deployMode || ''
this.slot = o.params.slot || 1
this.taskManager = o.params.taskManager || '2'
this.jobManagerMemory = o.params.jobManagerMemory || '1G'
this.taskManagerMemory = o.params.taskManagerMemory || '2G'
this.mainArgs = o.params.mainArgs || ''
this.others = o.params.others
this.programType = o.params.programType || 'SCALA'
// backfill resourceList
let resourceList = o.params.resourceList || []
if (resourceList.length) {
this.resourceList = resourceList
}
// backfill localParams
let localParams = o.params.localParams || []
if (localParams.length) {
this.localParams = localParams
}
}
})
},
mounted () {
},
components: { mLocalParams, mListBox, mResources }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.flink-model {
.list-box-4p {
.list {
margin-bottom: 14px;
.sp1 {
float: left;
width: 112px;
text-align: right;
margin-right: 10px;
font-size: 14px;
color: #777;
display: inline-block;
padding-top: 6px;
}
.sp2 {
float: left;
margin-right: 4px;
}
.sp3 {
width: 176px;
}
}
}
}
</style>

2
escheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js

@ -645,7 +645,6 @@ JSP.prototype.saveStore = function () {
}) })
}) })
console.log(tasksAll())
_.map(tasksAll(), v => { _.map(tasksAll(), v => {
locations[v.id] = { locations[v.id] = {
@ -656,7 +655,6 @@ JSP.prototype.saveStore = function () {
} }
}) })
console.log(locations)
// Storage node // Storage node
store.commit('dag/setTasks', tasks) store.commit('dag/setTasks', tasks)

4
escheduler-ui/src/js/conf/home/pages/dag/definitionDetails.vue

@ -25,7 +25,7 @@
props: {}, props: {},
methods: { methods: {
...mapMutations('dag', ['resetParams', 'setIsDetails']), ...mapMutations('dag', ['resetParams', 'setIsDetails']),
...mapActions('dag', ['getProcessList', 'getResourcesList', 'getProcessDetails']), ...mapActions('dag', ['getProcessList','getProjectList', 'getResourcesList', 'getProcessDetails']),
...mapActions('security', ['getTenantList','getWorkerGroupsAll']), ...mapActions('security', ['getTenantList','getWorkerGroupsAll']),
/** /**
* init * init
@ -40,6 +40,8 @@
this.getProcessDetails(this.$route.params.id), this.getProcessDetails(this.$route.params.id),
// get process definition // get process definition
this.getProcessList(), this.getProcessList(),
// get project
this.getProjectList(),
// get resource // get resource
this.getResourcesList(), this.getResourcesList(),
// get worker group list // get worker group list

211
escheduler-ui/src/js/conf/home/pages/dag/img/toobar_flink.svg

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 79 KiB

4
escheduler-ui/src/js/conf/home/pages/dag/index.vue

@ -24,7 +24,7 @@
props: {}, props: {},
methods: { methods: {
...mapMutations('dag', ['resetParams']), ...mapMutations('dag', ['resetParams']),
...mapActions('dag', ['getProcessList', 'getResourcesList']), ...mapActions('dag', ['getProcessList','getProjectList', 'getResourcesList']),
...mapActions('security', ['getTenantList','getWorkerGroupsAll']), ...mapActions('security', ['getTenantList','getWorkerGroupsAll']),
/** /**
* init * init
@ -37,6 +37,8 @@
Promise.all([ Promise.all([
// get process definition // get process definition
this.getProcessList(), this.getProcessList(),
// get project
this.getProjectList(),
// get resource // get resource
this.getResourcesList(), this.getResourcesList(),
// get worker group list // get worker group list

4
escheduler-ui/src/js/conf/home/pages/dag/instanceDetails.vue

@ -27,7 +27,7 @@
props: {}, props: {},
methods: { methods: {
...mapMutations('dag', ['setIsDetails', 'resetParams']), ...mapMutations('dag', ['setIsDetails', 'resetParams']),
...mapActions('dag', ['getProcessList', 'getResourcesList', 'getInstancedetail']), ...mapActions('dag', ['getProcessList','getProjectList', 'getResourcesList', 'getInstancedetail']),
...mapActions('security', ['getTenantList','getWorkerGroupsAll']), ...mapActions('security', ['getTenantList','getWorkerGroupsAll']),
/** /**
* init * init
@ -42,6 +42,8 @@
this.getInstancedetail(this.$route.params.id), this.getInstancedetail(this.$route.params.id),
// get process definition // get process definition
this.getProcessList(), this.getProcessList(),
// get project
this.getProjectList(),
// get resources // get resources
this.getResourcesList(), this.getResourcesList(),
// get worker group list // get worker group list

2
escheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/email.vue

@ -271,13 +271,11 @@
$(this).prop('comStart', true) $(this).prop('comStart', true)
// Check mailbox index initialization // Check mailbox index initialization
this.activeIndex = null this.activeIndex = null
// console.log('');
this.isCn = true this.isCn = true
}).on('compositionend', () => { }).on('compositionend', () => {
$(this).prop('comStart', false) $(this).prop('comStart', false)
// Check mailbox index initialization // Check mailbox index initialization
this.activeIndex = null this.activeIndex = null
// console.log('');
this.isCn = false this.isCn = false
}) })
} }

5
escheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/timing.vue

@ -48,7 +48,7 @@
<div class="clearfix list"> <div class="clearfix list">
<div style = "padding-left: 150px;">{{$t('Next five execution times')}}</div> <div style = "padding-left: 150px;">{{$t('Next five execution times')}}</div>
<ul style = "padding-left: 150px;"> <ul style = "padding-left: 150px;">
<li v-for="time in previewTimes">{{time}}</li> <li v-for="(time,i) in previewTimes" :key='i'>{{time}}</li>
</ul> </ul>
</div> </div>
@ -289,6 +289,9 @@
watch: { watch: {
}, },
created () { created () {
if(this.item.crontab !== null){
this.crontab = this.item.crontab
}
this.receivers = _.cloneDeep(this.receiversD) this.receivers = _.cloneDeep(this.receiversD)
this.receiversCc = _.cloneDeep(this.receiversCcD) this.receiversCc = _.cloneDeep(this.receiversCcD)
}, },

1
escheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/_source/list.vue

@ -123,6 +123,7 @@
shape="circle" shape="circle"
size="xsmall" size="xsmall"
data-toggle="tooltip" data-toggle="tooltip"
:disabled="item.state === 'RUNNING_EXEUTION'"
:title="$t('delete')"> :title="$t('delete')">
</x-button> </x-button>
</template> </template>

1
escheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue

@ -203,7 +203,6 @@
}, },
_submit () { _submit () {
this.$refs['popup'].spinnerLoading = true this.$refs['popup'].spinnerLoading = true
console.log(this.tenantId.id)
let param = { let param = {
userName: this.userName, userName: this.userName,
userPassword: this.userPassword, userPassword: this.userPassword,

29
escheduler-ui/src/js/conf/home/store/dag/actions.js

@ -261,6 +261,35 @@ export default {
}) })
}) })
}, },
/**
* Get a list of project
*/
getProjectList ({ state }, payload) {
return new Promise((resolve, reject) => {
if (state.projectListS.length) {
resolve()
return
}
io.get(`projects/queryAllProjectList`, payload, res => {
state.projectListS = res.data
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/**
* Get a list of process definitions by project id
*/
getProcessByProjectId ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get(`projects/${state.projectName}/process/queryProccessDefinitionAllByProjectId`, payload, res => {
resolve(res.data)
}).catch(res => {
reject(res)
})
})
},
/** /**
* get datasource * get datasource
*/ */

1
escheduler-ui/src/js/conf/home/store/dag/mutations.js

@ -109,6 +109,7 @@ export default {
state.tenantId = payload && payload.tenantId || -1 state.tenantId = payload && payload.tenantId || -1
state.processListS = payload && payload.processListS || [] state.processListS = payload && payload.processListS || []
state.resourcesListS = payload && payload.resourcesListS || [] state.resourcesListS = payload && payload.resourcesListS || []
state.projectListS = payload && payload.projectListS || []
state.isDetails = payload && payload.isDetails || false state.isDetails = payload && payload.isDetails || false
state.runFlag = payload && payload.runFlag || '' state.runFlag = payload && payload.runFlag || ''
state.locations = payload && payload.locations || {} state.locations = payload && payload.locations || {}

2
escheduler-ui/src/js/conf/home/store/dag/state.js

@ -47,6 +47,8 @@ export default {
syncDefine: true, syncDefine: true,
// tasks processList // tasks processList
processListS: [], processListS: [],
// projectList
projectListS: [],
// tasks resourcesList // tasks resourcesList
resourcesListS: [], resourcesListS: [],
// tasks datasource Type // tasks datasource Type

6
escheduler-ui/src/js/module/i18n/locale/zh_CN.js

@ -476,5 +476,9 @@ export default {
'warning of timeout': '超时告警', 'warning of timeout': '超时告警',
'Next five execution times': '接下来五次执行时间', 'Next five execution times': '接下来五次执行时间',
'Execute time': '执行时间', 'Execute time': '执行时间',
'Complement range': '补数范围' 'Complement range': '补数范围',
'slot':'slot数量',
'taskManager':'taskManage数量',
'jobManagerMemory':'jobManager内存数',
'taskManagerMemory':'taskManager内存数'
} }

1
escheduler-ui/src/lib/@analysys/ans-ui/package.json

@ -53,7 +53,6 @@
"babel-plugin-transform-runtime": "^6.23.0", "babel-plugin-transform-runtime": "^6.23.0",
"babel-plugin-transform-vue-jsx": "^3.7.0", "babel-plugin-transform-vue-jsx": "^3.7.0",
"babel-preset-env": "^1.5.2", "babel-preset-env": "^1.5.2",
"babel-runtime": "^6.26.0",
"cross-env": "^5.2.0", "cross-env": "^5.2.0",
"css-loader": "0.28.8", "css-loader": "0.28.8",
"cssnano": "^4.0.3", "cssnano": "^4.0.3",

1
escheduler-ui/src/lib/@fedor/io/package.json

@ -23,7 +23,6 @@
"babel-plugin-transform-class-properties": "^6.24.1", "babel-plugin-transform-class-properties": "^6.24.1",
"babel-plugin-transform-runtime": "^6.23.0", "babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.5.2", "babel-preset-env": "^1.5.2",
"babel-runtime": "^6.26.0",
"body-parser": "^1.17.2", "body-parser": "^1.17.2",
"chai": "^4.1.1", "chai": "^4.1.1",
"cors": "^2.8.4", "cors": "^2.8.4",

1
escheduler-ui/src/lib/@vue/crontab/example/app.vue

@ -36,7 +36,6 @@
_lang (type) { _lang (type) {
this.is = false this.is = false
this.lang = type this.lang = type
console.log(this.lang)
setTimeout(() => { setTimeout(() => {
this.is = true this.is = true
}, 1) }, 1)

BIN
escheduler-ui/src/view/docs/zh_CN/_book/images/flink_edit.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 121 KiB

254
install.sh

@ -34,252 +34,257 @@ fi
source ${workDir}/conf/config/run_config.conf source ${workDir}/conf/config/run_config.conf
source ${workDir}/conf/config/install_config.conf source ${workDir}/conf/config/install_config.conf
# mysql配置 # mysql config
# mysql 地址,端口 # mysql address and port
mysqlHost="192.168.xx.xx:3306" mysqlHost="192.168.xx.xx:3306"
# mysql 数据库名称 # mysql database
mysqlDb="escheduler" mysqlDb="escheduler"
# mysql 用户名 # mysql username
mysqlUserName="xx" mysqlUserName="xx"
# mysql 密码 # mysql passwprd
# 注意:如果有特殊字符,请用 \ 转移符进行转移 # Note: if there are special characters, please use the \ transfer character to transfer
mysqlPassword="xx" mysqlPassword="xx"
# conf/config/install_config.conf配置 # conf/config/install_config.conf config
# 注意:安装路径,不要当前路径(pwd)一样 # Note: the installation path is not the same as the current path (pwd)
installPath="/data1_1T/escheduler" installPath="/data1_1T/escheduler"
# 部署用户 # deployment user
# 注意:部署用户需要有sudo权限及操作hdfs的权限,如果开启hdfs,根目录需要自行创建 # Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
deployUser="escheduler" deployUser="escheduler"
# zk集群 # zk cluster
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
# 安装hosts # install hosts
# 注意:安装调度的机器hostname列表,如果是伪分布式,则只需写一个伪分布式hostname即可 # Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname
ips="ark0,ark1,ark2,ark3,ark4" ips="ark0,ark1,ark2,ark3,ark4"
# conf/config/run_config.conf配置 # conf/config/run_config.conf config
# 运行Master的机器 # run master machine
# 注意:部署master的机器hostname列表 # Note: list of hosts hostname for deploying master
masters="ark0,ark1" masters="ark0,ark1"
# 运行Worker的机器 # run worker machine
# 注意:部署worker的机器hostname列表 # note: list of machine hostnames for deploying workers
workers="ark2,ark3,ark4" workers="ark2,ark3,ark4"
# 运行Alert的机器 # run alert machine
# 注意:部署alert server的机器hostname列表 # note: list of machine hostnames for deploying alert server
alertServer="ark3" alertServer="ark3"
# 运行Api的机器 # run api machine
# 注意:部署api server的机器hostname列表 # note: list of machine hostnames for deploying api server
apiServers="ark1" apiServers="ark1"
# alert配置 # alert config
# 邮件协议 # mail protocol
mailProtocol="SMTP" mailProtocol="SMTP"
# 邮件服务host # mail server host
mailServerHost="smtp.exmail.qq.com" mailServerHost="smtp.exmail.qq.com"
# 邮件服务端口 # mail server port
mailServerPort="25" mailServerPort="25"
# 发送人 # sender
mailSender="xxxxxxxxxx" mailSender="xxxxxxxxxx"
# 发送人密码 # sender password
mailPassword="xxxxxxxxxx" mailPassword="xxxxxxxxxx"
# TLS邮件协议支持 # TLS mail protocol support
starttlsEnable="false" starttlsEnable="false"
# SSL邮件协议支持 # SSL mail protocol support
# 注意:默认开启的是SSL协议,TLS和SSL只能有一个处于true状态 # note: The SSL protocol is enabled by default.
# only one of TLS and SSL can be in the true state.
sslEnable="true" sslEnable="true"
# 下载Excel路径 # download excel path
xlsFilePath="/tmp/xls" xlsFilePath="/tmp/xls"
# 企业微信企业ID配置 # Enterprise WeChat Enterprise ID Configuration
enterpriseWechatCorpId="xxxxxxxxxx" enterpriseWechatCorpId="xxxxxxxxxx"
# 企业微信应用Secret配置 # Enterprise WeChat application Secret configuration
enterpriseWechatSecret="xxxxxxxxxx" enterpriseWechatSecret="xxxxxxxxxx"
# 企业微信应用AgentId配置 # Enterprise WeChat Application AgentId Configuration
enterpriseWechatAgentId="xxxxxxxxxx" enterpriseWechatAgentId="xxxxxxxxxx"
# 企业微信用户配置,多个用户以,分割 # Enterprise WeChat user configuration, multiple users to , split
enterpriseWechatUsers="xxxxx,xxxxx" enterpriseWechatUsers="xxxxx,xxxxx"
#是否启动监控自启动脚本 # whether to start monitoring self-starting scripts
monitorServerState="false" monitorServerState="false"
# 资源中心上传选择存储方式:HDFS,S3,NONE # resource Center upload and select storage method:HDFS,S3,NONE
resUploadStartupType="NONE" resUploadStartupType="NONE"
# 如果resUploadStartupType为HDFS,defaultFS写namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下 # if resUploadStartupType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
# 如果是S3,则写S3地址,比如说:s3a://escheduler,注意,一定要创建根目录/escheduler # if S3,write S3 address,HA,for example :s3a://escheduler,
# Note,s3 be sure to create the root directory /escheduler
defaultFS="hdfs://mycluster:8020" defaultFS="hdfs://mycluster:8020"
# 如果配置了S3,则需要有以下配置 # if S3 is configured, the following configuration is required.
s3Endpoint="http://192.168.xx.xx:9010" s3Endpoint="http://192.168.xx.xx:9010"
s3AccessKey="xxxxxxxxxx" s3AccessKey="xxxxxxxxxx"
s3SecretKey="xxxxxxxxxx" s3SecretKey="xxxxxxxxxx"
# resourcemanager HA配置,如果是单resourcemanager,这里为yarnHaIps="" # resourcemanager HA configuration, if it is a single resourcemanager, here is yarnHaIps=""
yarnHaIps="192.168.xx.xx,192.168.xx.xx" yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好 # if it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine.
singleYarnIp="ark1" singleYarnIp="ark1"
# hdfs根路径,根路径的owner必须是部署用户。1.1.0之前版本不会自动创建hdfs根目录,需要自行创建 # hdfs root path, the owner of the root path must be the deployment user.
# versions prior to 1.1.0 do not automatically create the hdfs root directory, you need to create it yourself.
hdfsPath="/escheduler" hdfsPath="/escheduler"
# 拥有在hdfs根路径/下创建目录权限的用户 # have users who create directory permissions under hdfs root path /
# 注意:如果开启了kerberos,则直接hdfsRootUser="",就可以 # Note: if kerberos is enabled, hdfsRootUser="" can be used directly.
hdfsRootUser="hdfs" hdfsRootUser="hdfs"
# common 配置 # common config
# 程序路径 # Program root path
programPath="/tmp/escheduler" programPath="/tmp/escheduler"
#下载路径 # download path
downloadPath="/tmp/escheduler/download" downloadPath="/tmp/escheduler/download"
# 任务执行路径 # task execute path
execPath="/tmp/escheduler/exec" execPath="/tmp/escheduler/exec"
# SHELL环境变量路径 # SHELL environmental variable path
shellEnvPath="$installPath/conf/env/.escheduler_env.sh" shellEnvPath="$installPath/conf/env/.escheduler_env.sh"
# 资源文件的后缀 # suffix of the resource file
resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml" resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
# 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除 # development status, if true, for the SHELL script, you can view the encapsulated SHELL script in the execPath directory.
# If it is false, execute the direct delete
devState="true" devState="true"
# kerberos 配置 # kerberos config
# kerberos 是否启动 # kerberos whether to start
kerberosStartUp="false" kerberosStartUp="false"
# kdc krb5 配置文件路径 # kdc krb5 config file path
krb5ConfPath="$installPath/conf/krb5.conf" krb5ConfPath="$installPath/conf/krb5.conf"
# keytab 用户名 # keytab username
keytabUserName="hdfs-mycluster@ESZ.COM" keytabUserName="hdfs-mycluster@ESZ.COM"
# 用户 keytab路径 # username keytab path
keytabPath="$installPath/conf/hdfs.headless.keytab" keytabPath="$installPath/conf/hdfs.headless.keytab"
# zk 配置 # zk config
# zk根目录 # zk root directory
zkRoot="/escheduler" zkRoot="/escheduler"
# 用来记录挂掉机器的zk目录 # used to record the zk directory of the hanging machine
zkDeadServers="/escheduler/dead-servers" zkDeadServers="/escheduler/dead-servers"
# masters目录 # masters directory
zkMasters="/escheduler/masters" zkMasters="$zkRoot/masters"
# workers目录 # workers directory
zkWorkers="/escheduler/workers" zkWorkers="$zkRoot/workers"
# zk master分布式锁 # zk master distributed lock
mastersLock="/escheduler/lock/masters" mastersLock="$zkRoot/lock/masters"
# zk worker分布式锁 # zk worker distributed lock
workersLock="/escheduler/lock/workers" workersLock="$zkRoot/lock/workers"
# zk master容错分布式锁 # zk master fault-tolerant distributed lock
mastersFailover="/escheduler/lock/failover/masters" mastersFailover="$zkRoot/lock/failover/masters"
# zk worker容错分布式锁 # zk worker fault-tolerant distributed lock
workersFailover="/escheduler/lock/failover/workers" workersFailover="$zkRoot/lock/failover/workers"
# zk master启动容错分布式锁 # zk master start fault tolerant distributed lock
mastersStartupFailover="/escheduler/lock/failover/startup-masters" mastersStartupFailover="$zkRoot/lock/failover/startup-masters"
# zk session 超时 # zk session timeout
zkSessionTimeout="300" zkSessionTimeout="300"
# zk 连接超时 # zk connection timeout
zkConnectionTimeout="300" zkConnectionTimeout="300"
# zk 重试间隔 # zk retry interval
zkRetrySleep="100" zkRetrySleep="100"
# zk重试最大次数 # zk retry maximum number of times
zkRetryMaxtime="5" zkRetryMaxtime="5"
# master 配置 # master config
# master执行线程最大数,流程实例的最大并行度 # master execution thread maximum number, maximum parallelism of process instance
masterExecThreads="100" masterExecThreads="100"
# master任务执行线程最大数,每一个流程实例的最大并行度 # the maximum number of master task execution threads, the maximum degree of parallelism for each process instance
masterExecTaskNum="20" masterExecTaskNum="20"
# master心跳间隔 # master heartbeat interval
masterHeartbeatInterval="10" masterHeartbeatInterval="10"
# master任务提交重试次数 # master task submission retries
masterTaskCommitRetryTimes="5" masterTaskCommitRetryTimes="5"
# master任务提交重试时间间隔 # master task submission retry interval
masterTaskCommitInterval="100" masterTaskCommitInterval="100"
# master最大cpu平均负载,用来判断master是否还有执行能力 # master maximum cpu average load, used to determine whether the master has execution capability
masterMaxCpuLoadAvg="10" masterMaxCpuLoadAvg="10"
# master预留内存,用来判断master是否还有执行能力 # master reserve memory to determine if the master has execution capability
masterReservedMemory="1" masterReservedMemory="1"
# worker 配置 # worker config
# worker执行线程 # worker execution thread
workerExecThreads="100" workerExecThreads="100"
# worker心跳间隔 # worker heartbeat interval
workerHeartbeatInterval="10" workerHeartbeatInterval="10"
# worker一次抓取任务数 # worker number of fetch tasks
workerFetchTaskNum="3" workerFetchTaskNum="3"
# worker最大cpu平均负载,用来判断worker是否还有执行能力,保持系统默认,默认为cpu核数的2倍,当负载达到2倍时, # workerThe maximum cpu average load, used to determine whether the worker still has the ability to execute,
# keep the system default, the default is twice the number of cpu cores, when the load reaches 2 times
#workerMaxCupLoadAvg="10" #workerMaxCupLoadAvg="10"
# worker预留内存,用来判断master是否还有执行能力 # worker reserve memory to determine if the master has execution capability
workerReservedMemory="1" workerReservedMemory="1"
# api 配置 # api config
# api 服务端口 # api server port
apiServerPort="12345" apiServerPort="12345"
# api session 超时 # api session timeout
apiServerSessionTimeout="7200" apiServerSessionTimeout="7200"
# api 上下文路径 # api server context path
apiServerContextPath="/escheduler/" apiServerContextPath="/escheduler/"
# spring 最大文件大小 # spring max file size
springMaxFileSize="1024MB" springMaxFileSize="1024MB"
# spring 最大请求文件大小 # spring max request size
springMaxRequestSize="1024MB" springMaxRequestSize="1024MB"
# api 最大post请求大小 # api max http post size
apiMaxHttpPostSize="5000000" apiMaxHttpPostSize="5000000"
# 1,替换文件 # 1,replace file
echo "1,替换文件" echo "1,replace file"
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties
sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${mysqlUserName}#g" conf/dao/data_source.properties sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${mysqlUserName}#g" conf/dao/data_source.properties
sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${mysqlPassword}#g" conf/dao/data_source.properties sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${mysqlPassword}#g" conf/dao/data_source.properties
@ -375,8 +380,8 @@ sed -i ${txt} "s#alertServer.*#alertServer=${alertServer}#g" conf/config/run_con
sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf
# 2,创建目录 # 2,create directory
echo "2,创建目录" echo "2,create directory"
if [ ! -d $installPath ];then if [ ! -d $installPath ];then
sudo mkdir -p $installPath sudo mkdir -p $installPath
@ -387,22 +392,22 @@ hostsArr=(${ips//,/ })
for host in ${hostsArr[@]} for host in ${hostsArr[@]}
do do
# 如果programPath不存在,则创建 # create if programPath does not exist
if ! ssh $host test -e $programPath; then if ! ssh $host test -e $programPath; then
ssh $host "sudo mkdir -p $programPath;sudo chown -R $deployUser:$deployUser $programPath" ssh $host "sudo mkdir -p $programPath;sudo chown -R $deployUser:$deployUser $programPath"
fi fi
# 如果downloadPath不存在,则创建 # create if downloadPath does not exist
if ! ssh $host test -e $downloadPath; then if ! ssh $host test -e $downloadPath; then
ssh $host "sudo mkdir -p $downloadPath;sudo chown -R $deployUser:$deployUser $downloadPath" ssh $host "sudo mkdir -p $downloadPath;sudo chown -R $deployUser:$deployUser $downloadPath"
fi fi
# 如果$execPath不存在,则创建 # create if execPath does not exist
if ! ssh $host test -e $execPath; then if ! ssh $host test -e $execPath; then
ssh $host "sudo mkdir -p $execPath; sudo chown -R $deployUser:$deployUser $execPath" ssh $host "sudo mkdir -p $execPath; sudo chown -R $deployUser:$deployUser $execPath"
fi fi
# 如果$xlsFilePath不存在,则创建 # create if xlsFilePath does not exist
if ! ssh $host test -e $xlsFilePath; then if ! ssh $host test -e $xlsFilePath; then
ssh $host "sudo mkdir -p $xlsFilePath; sudo chown -R $deployUser:$deployUser $xlsFilePath" ssh $host "sudo mkdir -p $xlsFilePath; sudo chown -R $deployUser:$deployUser $xlsFilePath"
fi fi
@ -410,31 +415,31 @@ fi
done done
# 3,停止服务 # 3,stop server
echo "3,停止服务" echo "3,stop server"
sh ${workDir}/script/stop_all.sh sh ${workDir}/script/stop-all.sh
# 4,删除zk节点 # 4,delete zk node
echo "4,删除zk节点" echo "4,delete zk node"
sleep 1 sleep 1
python ${workDir}/script/del_zk_node.py $zkQuorum $zkRoot python ${workDir}/script/del-zk-node.py $zkQuorum $zkRoot
# 5,scp资源 # 5,scp resources
echo "5,scp资源" echo "5,scp resources"
sh ${workDir}/script/scp_hosts.sh sh ${workDir}/script/scp-hosts.sh
if [ $? -eq 0 ] if [ $? -eq 0 ]
then then
echo 'scp拷贝完成' echo 'scp copy completed'
else else
echo 'sc 拷贝失败退出' echo 'sc copy failed to exit'
exit -1 exit -1
fi fi
# 6,启动 # 6,startup
echo "6,启动" echo "6,startup"
sh ${workDir}/script/start_all.sh sh ${workDir}/script/start-all.sh
# 7,启动监控自启动脚本 # 7,start monitoring self-starting script
monitor_pid=${workDir}/monitor_server.pid monitor_pid=${workDir}/monitor_server.pid
if [ "true" = $monitorServerState ];then if [ "true" = $monitorServerState ];then
if [ -f $monitor_pid ]; then if [ -f $monitor_pid ]; then
@ -453,9 +458,8 @@ if [ "true" = $monitorServerState ];then
echo "monitor server running as process ${TARGET_PID}.Stopped success" echo "monitor server running as process ${TARGET_PID}.Stopped success"
rm -f $monitor_pid rm -f $monitor_pid
fi fi
nohup python -u ${workDir}/script/monitor_server.py $installPath $zkQuorum $zkMasters $zkWorkers > ${workDir}/monitor_server.log 2>&1 & nohup python -u ${workDir}/script/monitor-server.py $installPath $zkQuorum $zkMasters $zkWorkers > ${workDir}/monitor-server.log 2>&1 &
echo $! > $monitor_pid echo $! > $monitor_pid
echo "start monitor server success as process `cat $monitor_pid`" echo "start monitor server success as process `cat $monitor_pid`"
fi fi

4
package.xml

@ -70,8 +70,8 @@
<fileSet> <fileSet>
<directory>script</directory> <directory>script</directory>
<includes> <includes>
<include>start_all.sh</include> <include>start-all.sh</include>
<include>stop_all.sh</include> <include>stop-all.sh</include>
<include>escheduler-daemon.sh</include> <include>escheduler-daemon.sh</include>
</includes> </includes>
<outputDirectory>./bin</outputDirectory> <outputDirectory>./bin</outputDirectory>

103
pom.xml

@ -20,6 +20,39 @@
<hadoop.version>2.7.3</hadoop.version> <hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.2.3</quartz.version> <quartz.version>2.2.3</quartz.version>
<jackson.version>2.9.8</jackson.version> <jackson.version>2.9.8</jackson.version>
<mybatis.version>3.5.1</mybatis.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<fastjson.version>1.2.61</fastjson.version>
<druid.version>1.1.14</druid.version>
<h2.version>1.3.163</h2.version>
<commons.codec.version>1.6</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.lang.version>2.3</commons.lang.version>
<commons.lang3.version>3.5</commons.lang3.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.7.0</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<freemarker.version>2.3.21</freemarker.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>20.0</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<jsp.version>6.1.14</jsp.version>
</properties> </properties>
<dependencyManagement> <dependencyManagement>
@ -28,22 +61,22 @@
<dependency> <dependency>
<groupId>org.mybatis</groupId> <groupId>org.mybatis</groupId>
<artifactId>mybatis</artifactId> <artifactId>mybatis</artifactId>
<version>3.5.1</version> <version>${mybatis.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.mybatis</groupId> <groupId>org.mybatis</groupId>
<artifactId>mybatis-spring</artifactId> <artifactId>mybatis-spring</artifactId>
<version>2.0.1</version> <version>${mybatis.spring.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.mybatis.spring.boot</groupId> <groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-autoconfigure</artifactId> <artifactId>mybatis-spring-boot-autoconfigure</artifactId>
<version>2.0.1</version> <version>${mybatis.spring.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.mybatis.spring.boot</groupId> <groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-starter</artifactId> <artifactId>mybatis-spring-boot-starter</artifactId>
<version>2.0.1</version> <version>${mybatis.spring.version}</version>
</dependency> </dependency>
<!-- quartz--> <!-- quartz-->
@ -60,18 +93,18 @@
<dependency> <dependency>
<groupId>com.cronutils</groupId> <groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId> <artifactId>cron-utils</artifactId>
<version>5.0.5</version> <version>${cron.utils.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.alibaba</groupId> <groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId> <artifactId>fastjson</artifactId>
<version>1.2.29</version> <version>${fastjson.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.alibaba</groupId> <groupId>com.alibaba</groupId>
<artifactId>druid</artifactId> <artifactId>druid</artifactId>
<version>1.1.14</version> <version>${druid.version}</version>
</dependency> </dependency>
<dependency> <dependency>
@ -116,7 +149,7 @@
<dependency> <dependency>
<groupId>com.h2database</groupId> <groupId>com.h2database</groupId>
<artifactId>h2</artifactId> <artifactId>h2</artifactId>
<version>1.3.163</version> <version>${h2.version}</version>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
@ -165,22 +198,22 @@
<dependency> <dependency>
<groupId>commons-codec</groupId> <groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId> <artifactId>commons-codec</artifactId>
<version>1.6</version> <version>${commons.codec.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>commons-logging</groupId> <groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId> <artifactId>commons-logging</artifactId>
<version>1.1.1</version> <version>${commons.logging.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId> <artifactId>httpclient</artifactId>
<version>4.4.1</version> <version>${httpclient.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId> <artifactId>httpcore</artifactId>
<version>4.4.1</version> <version>${httpcore.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.fasterxml.jackson.core</groupId> <groupId>com.fasterxml.jackson.core</groupId>
@ -201,56 +234,56 @@
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>
<artifactId>junit</artifactId> <artifactId>junit</artifactId>
<version>4.12</version> <version>${junit.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>mysql</groupId> <groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId> <artifactId>mysql-connector-java</artifactId>
<version>5.1.34</version> <version>${mysql.connector.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.slf4j</groupId> <groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId> <artifactId>slf4j-api</artifactId>
<version>1.7.5</version> <version>${slf4j.api.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.slf4j</groupId> <groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId> <artifactId>slf4j-log4j12</artifactId>
<version>1.7.5</version> <version>${slf4j.log4j12.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>commons-collections</groupId> <groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId> <artifactId>commons-collections</artifactId>
<version>3.2.2</version> <version>${commons.collections.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>commons-lang</groupId> <groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId> <artifactId>commons-lang</artifactId>
<version>2.3</version> <version>${commons.lang.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.commons</groupId> <groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId> <artifactId>commons-lang3</artifactId>
<version>3.5</version> <version>${commons.lang3.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>commons-httpclient</groupId> <groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId> <artifactId>commons-httpclient</artifactId>
<version>3.0.1</version> <version>${commons.httpclient}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>commons-beanutils</groupId> <groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId> <artifactId>commons-beanutils</artifactId>
<version>1.7.0</version> <version>${commons.beanutils.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>commons-configuration</groupId> <groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId> <artifactId>commons-configuration</artifactId>
<version>1.10</version> <version>${commons.configuration.version}</version>
</dependency> </dependency>
<dependency> <dependency>
@ -268,20 +301,20 @@
<dependency> <dependency>
<groupId>org.apache.commons</groupId> <groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId> <artifactId>commons-email</artifactId>
<version>1.5</version> <version>${commons.email.version}</version>
</dependency> </dependency>
<!--excel poi--> <!--excel poi-->
<dependency> <dependency>
<groupId>org.apache.poi</groupId> <groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId> <artifactId>poi</artifactId>
<version>3.17</version> <version>${poi.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.freemarker</groupId> <groupId>org.freemarker</groupId>
<artifactId>freemarker</artifactId> <artifactId>freemarker</artifactId>
<version>2.3.21</version> <version>${freemarker.version}</version>
</dependency> </dependency>
@ -325,61 +358,61 @@
<dependency> <dependency>
<groupId>javax.servlet</groupId> <groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId> <artifactId>javax.servlet-api</artifactId>
<version>3.1.0</version> <version>${javax.servlet.api.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.commons</groupId> <groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId> <artifactId>commons-collections4</artifactId>
<version>4.1</version> <version>${commons.collections4.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.google.guava</groupId> <groupId>com.google.guava</groupId>
<artifactId>guava</artifactId> <artifactId>guava</artifactId>
<version>20.0</version> <version>${guava.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.postgresql</groupId> <groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId> <artifactId>postgresql</artifactId>
<version>42.1.4</version> <version>${postgresql.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hive</groupId> <groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId> <artifactId>hive-jdbc</artifactId>
<version>2.1.0</version> <version>${hive.jdbc.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>commons-io</groupId> <groupId>commons-io</groupId>
<artifactId>commons-io</artifactId> <artifactId>commons-io</artifactId>
<version>2.4</version> <version>${commons.io.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.github.oshi</groupId> <groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId> <artifactId>oshi-core</artifactId>
<version>3.5.0</version> <version>${oshi.core.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>ru.yandex.clickhouse</groupId> <groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId> <artifactId>clickhouse-jdbc</artifactId>
<version>0.1.52</version> <version>${clickhouse.jdbc.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.microsoft.sqlserver</groupId> <groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId> <artifactId>mssql-jdbc</artifactId>
<version>6.1.0.jre8</version> <version>${mssql.jdbc.version}</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.mortbay.jetty</groupId> <groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId> <artifactId>jsp-2.1</artifactId>
<version>6.1.14</version> <version>${jsp.version}</version>
</dependency> </dependency>
</dependencies> </dependencies>

2
script/create_escheduler.sh → script/create-escheduler.sh

@ -13,7 +13,7 @@ export ESCHEDULER_LIB_JARS=$ESCHEDULER_HOME/lib/*
export ESCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70" export ESCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
export STOP_TIMEOUT=5 export STOP_TIMEOUT=5
CLASS=cn.escheduler.dao.upgrade.shell.CreateEscheduler CLASS=cn.escheduler.dao.upgrade.shell.CreateDolphinScheduler
exec_command="$ESCHEDULER_OPTS -classpath $ESCHEDULER_CONF_DIR:$ESCHEDULER_LIB_JARS $CLASS" exec_command="$ESCHEDULER_OPTS -classpath $ESCHEDULER_CONF_DIR:$ESCHEDULER_LIB_JARS $CLASS"

0
script/del_zk_node.py → script/del-zk-node.py

4
script/env/.escheduler_env.sh vendored

@ -5,5 +5,5 @@ export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/opt/soft/python export PYTHON_HOME=/opt/soft/python
export JAVA_HOME=/opt/soft/java export JAVA_HOME=/opt/soft/java
export HIVE_HOME=/opt/soft/hive export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$PATH

2
script/escheduler-daemon.sh

@ -47,7 +47,7 @@ elif [ "$command" = "master-server" ]; then
LOG_FILE="-Dspring.config.location=conf/application_master.properties -Ddruid.mysql.usePingMethod=false" LOG_FILE="-Dspring.config.location=conf/application_master.properties -Ddruid.mysql.usePingMethod=false"
CLASS=cn.escheduler.server.master.MasterServer CLASS=cn.escheduler.server.master.MasterServer
elif [ "$command" = "worker-server" ]; then elif [ "$command" = "worker-server" ]; then
LOG_FILE="-Dlogback.configurationFile=conf/worker_logback.xml -Ddruid.mysql.usePingMethod=false" LOG_FILE="-Dspring.config.location=conf/application_worker.properties -Ddruid.mysql.usePingMethod=false"
CLASS=cn.escheduler.server.worker.WorkerServer CLASS=cn.escheduler.server.worker.WorkerServer
elif [ "$command" = "alert-server" ]; then elif [ "$command" = "alert-server" ]; then
LOG_FILE="-Dlogback.configurationFile=conf/alert_logback.xml" LOG_FILE="-Dlogback.configurationFile=conf/alert_logback.xml"

43
script/monitor_server.py → script/monitor-server.py

@ -1,21 +1,26 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
# Author:qiaozhanwei
''' '''
yum 安装pip 1, yum install pip
yum -y install python-pip yum -y install python-pip
pip install kazoo 安装 2, pip install kazoo
conda install -c conda-forge kazoo 安装 pip install kazoo
运行脚本及参数说明 or
3, conda install kazoo
conda install -c conda-forge kazoo
run script and parameter description
nohup python -u monitor_server.py /data1_1T/escheduler 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 /escheduler/masters /escheduler/workers> monitor_server.log 2>&1 & nohup python -u monitor_server.py /data1_1T/escheduler 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 /escheduler/masters /escheduler/workers> monitor_server.log 2>&1 &
参数说明如下: the parameters are as follows:
/data1_1T/escheduler的值来自install.sh中的installPath /data1_1T/escheduler : the value comes from the installPath in install.sh
192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181的值来自install.sh中的zkQuorum 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 : the value comes from zkQuorum in install.sh
/escheduler/masters的值来自install.sh中的zkMasters the value comes from zkWorkers in install.sh
/escheduler/workers的值来自install.sh中的zkWorkers /escheduler/masters : the value comes from zkMasters in install.sh
/escheduler/workers : the value comes from zkWorkers in install.sh
''' '''
import sys import sys
import socket import socket
@ -29,11 +34,11 @@ schedule = sched.scheduler(time.time, time.sleep)
class ZkClient: class ZkClient:
def __init__(self): def __init__(self):
# hosts配置zk地址集群 # hosts configuration zk address cluster
self.zk = KazooClient(hosts=zookeepers) self.zk = KazooClient(hosts=zookeepers)
self.zk.start() self.zk.start()
# 读取配置文件,组装成字典 # read configuration files and assemble them into a dictionary
def read_file(self,path): def read_file(self,path):
with open(path, 'r') as f: with open(path, 'r') as f:
dict = {} dict = {}
@ -43,11 +48,11 @@ class ZkClient:
dict[arr[0]] = arr[1] dict[arr[0]] = arr[1]
return dict return dict
# 根据hostname获取ip地址 # get the ip address according to hostname
def get_ip_by_hostname(self,hostname): def get_ip_by_hostname(self,hostname):
return socket.gethostbyname(hostname) return socket.gethostbyname(hostname)
# 重启服务 # restart server
def restart_server(self,inc): def restart_server(self,inc):
config_dict = self.read_file(install_path + '/conf/config/run_config.conf') config_dict = self.read_file(install_path + '/conf/config/run_config.conf')
@ -67,7 +72,7 @@ class ZkClient:
restart_master_list = list(set(master_list) - set(zk_master_list)) restart_master_list = list(set(master_list) - set(zk_master_list))
if (len(restart_master_list) != 0): if (len(restart_master_list) != 0):
for master in restart_master_list: for master in restart_master_list:
print("master " + self.get_ip_by_hostname(master) + " 服务已经掉了") print("master " + self.get_ip_by_hostname(master) + " server has down")
os.system('ssh ' + self.get_ip_by_hostname(master) + ' sh ' + install_path + '/bin/escheduler-daemon.sh start master-server') os.system('ssh ' + self.get_ip_by_hostname(master) + ' sh ' + install_path + '/bin/escheduler-daemon.sh start master-server')
if (self.zk.exists(workers_zk_path)): if (self.zk.exists(workers_zk_path)):
@ -78,15 +83,15 @@ class ZkClient:
restart_worker_list = list(set(worker_list) - set(zk_worker_list)) restart_worker_list = list(set(worker_list) - set(zk_worker_list))
if (len(restart_worker_list) != 0): if (len(restart_worker_list) != 0):
for worker in restart_worker_list: for worker in restart_worker_list:
print("worker " + self.get_ip_by_hostname(worker) + " 服务已经掉了") print("worker " + self.get_ip_by_hostname(worker) + " server has down")
os.system('ssh ' + self.get_ip_by_hostname(worker) + ' sh ' + install_path + '/bin/escheduler-daemon.sh start worker-server') os.system('ssh ' + self.get_ip_by_hostname(worker) + ' sh ' + install_path + '/bin/escheduler-daemon.sh start worker-server')
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
schedule.enter(inc, 0, self.restart_server, (inc,)) schedule.enter(inc, 0, self.restart_server, (inc,))
# 默认参数60s # default parameter 60s
def main(self,inc=60): def main(self,inc=60):
# enter四个参数分别为:间隔事件、优先级(用于同时间到达的两个事件同时执行时定序)、被调用触发的函数 # the enter four parameters are: interval event, priority (sequence for simultaneous execution of two events arriving at the same time), function triggered by the call
# 给该触发函数的参数(tuple形式) # the argument to the trigger function (tuple form)
schedule.enter(0, 0, self.restart_server, (inc,)) schedule.enter(0, 0, self.restart_server, (inc,))
schedule.run() schedule.run()
if __name__ == '__main__': if __name__ == '__main__':

0
script/scp_hosts.sh → script/scp-hosts.sh

0
script/start_all.sh → script/start-all.sh

0
script/stop_all.sh → script/stop-all.sh

2
script/upgrade_escheduler.sh → script/upgrade-escheduler.sh

@ -13,7 +13,7 @@ export ESCHEDULER_LIB_JARS=$ESCHEDULER_HOME/lib/*
export ESCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70" export ESCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
export STOP_TIMEOUT=5 export STOP_TIMEOUT=5
CLASS=cn.escheduler.dao.upgrade.shell.UpgradeEscheduler CLASS=cn.escheduler.dao.upgrade.shell.UpgradeDolphinScheduler
exec_command="$ESCHEDULER_OPTS -classpath $ESCHEDULER_CONF_DIR:$ESCHEDULER_LIB_JARS $CLASS" exec_command="$ESCHEDULER_OPTS -classpath $ESCHEDULER_CONF_DIR:$ESCHEDULER_LIB_JARS $CLASS"

0
sql/create/release-1.0.0_schema/mysql/escheduler_ddl.sql → sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save