Browse Source

merge from upstream

pull/2/head
lenboo 5 years ago
parent
commit
7cdca7bf74
  1. 102
      README.md
  2. 77
      README_zh_CN.md
  3. 31
      docs/zh_CN/1.0.4-release.md
  4. 23
      docs/zh_CN/1.0.5-release.md
  5. 55
      docs/zh_CN/1.1.0-release.md
  6. 271
      docs/zh_CN/EasyScheduler-FAQ.md
  7. 1
      docs/zh_CN/SUMMARY.md
  8. 2
      docs/zh_CN/book.json
  9. BIN
      docs/zh_CN/images/cdh_hive_error.png
  10. BIN
      docs/zh_CN/images/hive_kerberos.png
  11. BIN
      docs/zh_CN/images/master_worker_lack_res.png
  12. BIN
      docs/zh_CN/images/sparksql_kerberos.png
  13. 4
      docs/zh_CN/前端部署文档.md
  14. 6
      docs/zh_CN/后端部署文档.md
  15. 2
      docs/zh_CN/快速上手.md
  16. 62
      docs/zh_CN/系统使用手册.md
  17. 6
      docs/zh_CN/系统架构设计.md
  18. 2
      escheduler-alert/pom.xml
  19. 57
      escheduler-alert/src/main/java/cn/escheduler/alert/manager/EnterpriseWeChatManager.java
  20. 9
      escheduler-alert/src/main/java/cn/escheduler/alert/runner/AlertSender.java
  21. 20
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java
  22. 248
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/EnterpriseWeChatUtils.java
  23. 35
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/FuncUtils.java
  24. 1
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java
  25. 9
      escheduler-alert/src/main/resources/alert.properties
  26. 117
      escheduler-alert/src/test/java/cn/escheduler/alert/utils/EnterpriseWeChatUtilsTest.java
  27. 17
      escheduler-api/pom.xml
  28. 54
      escheduler-api/src/main/java/cn/escheduler/api/CombinedApplicationServer.java
  29. 1
      escheduler-api/src/main/java/cn/escheduler/api/configuration/AppConfiguration.java
  30. 47
      escheduler-api/src/main/java/cn/escheduler/api/controller/DataSourceController.java
  31. 51
      escheduler-api/src/main/java/cn/escheduler/api/controller/ExecutorController.java
  32. 4
      escheduler-api/src/main/java/cn/escheduler/api/controller/MonitorController.java
  33. 4
      escheduler-api/src/main/java/cn/escheduler/api/controller/ResourcesController.java
  34. 29
      escheduler-api/src/main/java/cn/escheduler/api/controller/SchedulerController.java
  35. 8
      escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java
  36. 98
      escheduler-api/src/main/java/cn/escheduler/api/service/DataSourceService.java
  37. 29
      escheduler-api/src/main/java/cn/escheduler/api/service/ExecutorService.java
  38. 41
      escheduler-api/src/main/java/cn/escheduler/api/service/MonitorService.java
  39. 7
      escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java
  40. 30
      escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java
  41. 9
      escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java
  42. 135
      escheduler-api/src/main/java/cn/escheduler/api/service/ResourcesService.java
  43. 54
      escheduler-api/src/main/java/cn/escheduler/api/service/SchedulerService.java
  44. 44
      escheduler-api/src/main/java/cn/escheduler/api/service/TenantService.java
  45. 12
      escheduler-api/src/main/java/cn/escheduler/api/service/UdfFuncService.java
  46. 54
      escheduler-api/src/main/java/cn/escheduler/api/service/UsersService.java
  47. 4
      escheduler-api/src/main/java/cn/escheduler/api/utils/CheckUtils.java
  48. 1
      escheduler-api/src/main/java/cn/escheduler/api/utils/Constants.java
  49. 39
      escheduler-api/src/main/java/cn/escheduler/api/utils/ZookeeperMonitor.java
  50. 54
      escheduler-api/src/main/resources/combined_logback.xml
  51. 12
      escheduler-api/src/main/resources/i18n/messages.properties
  52. 12
      escheduler-api/src/main/resources/i18n/messages_en_US.properties
  53. 10
      escheduler-api/src/main/resources/i18n/messages_zh_CN.properties
  54. 42
      escheduler-api/src/main/resources/logback.xml
  55. 24
      escheduler-api/src/test/java/cn/escheduler/api/controller/ResourcesControllerTest.java
  56. 13
      escheduler-api/src/test/java/cn/escheduler/api/controller/SchedulerControllerTest.java
  57. 29
      escheduler-api/src/test/java/cn/escheduler/api/utils/ZookeeperMonitorUtilsTest.java
  58. 2
      escheduler-common/pom.xml
  59. 90
      escheduler-common/src/main/java/cn/escheduler/common/Constants.java
  60. 29
      escheduler-common/src/main/java/cn/escheduler/common/enums/ResUploadType.java
  61. 35
      escheduler-common/src/main/java/cn/escheduler/common/enums/TaskRecordStatus.java
  62. 8
      escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java
  63. 15
      escheduler-common/src/main/java/cn/escheduler/common/enums/ZKNodeType.java
  64. 20
      escheduler-common/src/main/java/cn/escheduler/common/job/db/BaseDataSource.java
  65. 13
      escheduler-common/src/main/java/cn/escheduler/common/job/db/HiveDataSource.java
  66. 3
      escheduler-common/src/main/java/cn/escheduler/common/job/db/SparkDataSource.java
  67. 9
      escheduler-common/src/main/java/cn/escheduler/common/queue/ITaskQueue.java
  68. 2
      escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueFactory.java
  69. 160
      escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java
  70. 11
      escheduler-common/src/main/java/cn/escheduler/common/utils/CommonUtils.java
  71. 6
      escheduler-common/src/main/java/cn/escheduler/common/utils/DependentUtils.java
  72. 102
      escheduler-common/src/main/java/cn/escheduler/common/utils/HadoopUtils.java
  73. 64
      escheduler-common/src/main/java/cn/escheduler/common/utils/IpUtils.java
  74. 2
      escheduler-common/src/main/java/cn/escheduler/common/utils/OSUtils.java
  75. 14
      escheduler-common/src/main/java/cn/escheduler/common/utils/PropertyUtils.java
  76. 20
      escheduler-common/src/main/java/cn/escheduler/common/utils/dependent/DependentDateUtils.java
  77. 92
      escheduler-common/src/main/java/cn/escheduler/common/zk/AbstractZKClient.java
  78. 21
      escheduler-common/src/main/resources/common/common.properties
  79. 12
      escheduler-common/src/main/resources/common/hadoop/hadoop.properties
  80. 6
      escheduler-common/src/test/java/cn/escheduler/common/os/OSUtilsTest.java
  81. 87
      escheduler-common/src/test/java/cn/escheduler/common/queue/TaskQueueImplTest.java
  82. 20
      escheduler-common/src/test/java/cn/escheduler/common/utils/DependentUtilsTest.java
  83. 41
      escheduler-common/src/test/java/cn/escheduler/common/utils/IpUtilsTest.java
  84. 6
      escheduler-dao/pom.xml
  85. 147
      escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java
  86. 46
      escheduler-dao/src/main/java/cn/escheduler/dao/TaskRecordDao.java
  87. 13
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/DataSourceMapper.java
  88. 19
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/DataSourceMapperProvider.java
  89. 5
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java
  90. 4
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java
  91. 11
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapper.java
  92. 15
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java
  93. 18
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ResourceMapper.java
  94. 16
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ResourceMapperProvider.java
  95. 7
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TaskInstanceMapperProvider.java
  96. 2
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/UserMapperProvider.java
  97. 17
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/WorkerServerMapper.java
  98. 15
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/WorkerServerMapperProvider.java
  99. 10
      escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessData.java
  100. 13
      escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessDefinition.java
  101. Some files were not shown because too many files have changed in this diff Show More

102
README.md

@ -4,69 +4,91 @@ Easy Scheduler
> Easy Scheduler for Big Data
**设计特点:** 一个分布式易扩展的可视化DAG工作流任务调度系统。致力于解决数据处理流程中错综复杂的依赖关系,使调度系统在数据处理流程中`开箱即用`。
其主要目标如下:
- 以DAG图的方式将Task按照任务的依赖关系关联起来,可实时可视化监控任务的运行状态
- 支持丰富的任务类型:Shell、MR、Spark、SQL(mysql、postgresql、hive、sparksql),Python,Sub_Process、Procedure等
- 支持工作流定时调度、依赖调度、手动调度、手动暂停/停止/恢复,同时支持失败重试/告警、从指定节点恢复失败、Kill任务等操作
- 支持工作流优先级、任务优先级及任务的故障转移及任务超时告警/失败
- 支持工作流全局参数及节点自定义参数设置
- 支持资源文件的在线上传/下载,管理等,支持在线文件创建、编辑
- 支持任务日志在线查看及滚动、在线下载日志等
- 实现集群HA,通过Zookeeper实现Master集群和Worker集群去中心化
- 支持对`Master/Worker` cpu load,memory,cpu在线查看
- 支持工作流运行历史树形/甘特图展示、支持任务状态统计、流程状态统计
- 支持补数
- 支持多租户
- 支持国际化
- 还有更多等待伙伴们探索
### Design features:
### 与同类调度系统的对比
A distributed and easy-to-expand visual DAG workflow scheduling system. Dedicated to solving the complex dependencies in data processing, making the scheduling system `out of the box` for data processing.
Its main objectives are as follows:
![调度系统对比](http://geek.analysys.cn/static/upload/47/2019-03-01/9609ca82-cf8b-4d91-8dc0-0e2805194747.jpeg)
- Associate the Tasks according to the dependencies of the tasks in a DAG graph, which can visualize the running state of task in real time.
- Support for many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc.
- Support process scheduling, dependency scheduling, manual scheduling, manual pause/stop/recovery, support for failed retry/alarm, recovery from specified nodes, Kill task, etc.
- Support process priority, task priority and task failover and task timeout alarm/failure
- Support process global parameters and node custom parameter settings
- Support online upload/download of resource files, management, etc. Support online file creation and editing
- Support task log online viewing and scrolling, online download log, etc.
- Implement cluster HA, decentralize Master cluster and Worker cluster through Zookeeper
- Support online viewing of `Master/Worker` cpu load, memory, cpu
- Support process running history tree/gantt chart display, support task status statistics, process status statistics
- Support for complement
- Support for multi-tenant
- Support internationalization
- There are more waiting partners to explore
### 系统部分截图
![](http://geek.analysys.cn/static/upload/221/2019-03-29/0a9dea80-fb02-4fa5-a812-633b67035ffc.jpeg)
### Comparison with similar scheduler systems
![](http://geek.analysys.cn/static/upload/221/2019-04-01/83686def-a54f-4169-8cae-77b1f8300cc1.png)
![](http://geek.analysys.cn/static/upload/221/2019-03-29/83c937c7-1793-4d7a-aa28-b98460329fe0.jpeg)
  | EasyScheduler | Azkaban | Airflow
-- | -- | -- | --
**Stability** |   |   |  
Single point of failure | Decentralized multi-master and multi-worker | Yes Single Web and Scheduler Combination Node | Yes. Single Scheduler
Additional HA requirements | Not required (HA is supported by itself) | DB | Celery / Dask / Mesos + Load Balancer + DB
Overload processing | Task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured, when too many tasks will be cached in the task queue, will not cause machine jam. | Jammed the server when there are too many tasks | Jammed the server when there are too many tasks
**Easy to use** |   |   |  
DAG Monitoring Interface | Visualization process defines key information such as task status, task type, retry times, task running machine, visual variables and so on at a glance. | Only task status can be seen | Can't visually distinguish task types
Visual process definition | Yes All process definition operations are visualized, dragging tasks to draw DAGs, configuring data sources and resources. At the same time, for third-party systems, the api mode operation is provided. | No DAG and custom upload via custom DSL | No DAG is drawn through Python code, which is inconvenient to use, especially for business people who can't write code.
Quick deployment | One-click deployment | Complex clustering deployment | Complex clustering deployment
**Features** |   |   |  
Suspend and resume | Support pause, recover operation | No Can only kill the workflow first and then re-run | No Can only kill the workflow first and then re-run
Whether to support multiple tenants | Users on easyscheduler can achieve many-to-one or one-to-one mapping relationship through tenants and Hadoop users, which is very important for scheduling large data jobs. " Supports traditional shell tasks, while supporting large data platform task scheduling: MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Procedure, Sub_Process | No | No
Task type | Supports traditional shell tasks, and also support big data platform task scheduling: MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Procedure, Sub_Process | shell、gobblin、hadoopJava、java、hive、pig、spark、hdfsToTeradata、teradataToHdfs | BashOperator、DummyOperator、MySqlOperator、HiveOperator、EmailOperator、HTTPOperator、SqlOperator
Compatibility | Support the scheduling of big data jobs like spark, hive, Mr. At the same time, it is more compatible with big data business because it supports multiple tenants. | Because it does not support multi-tenant, it is not flexible enough to use business in big data platform. | Because it does not support multi-tenant, it is not flexible enough to use business in big data platform.
**Scalability** |   |   |  
Whether to support custom task types | Yes | Yes | Yes
Is Cluster Extension Supported? | Yes The scheduler uses distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster. Master and Worker support dynamic online and offline. | Yes, but complicated Executor horizontal extend | Yes, but complicated Executor horizontal extend
### 文档
- <a href="https://analysys.github.io/easyscheduler_docs_cn/后端部署文档.html" target="_blank">后端部署文档</a>
- <a href="https://analysys.github.io/easyscheduler_docs_cn/前端部署文档.html" target="_blank">前端部署文档</a>
- [**使用手册**](https://analysys.github.io/easyscheduler_docs_cn/系统使用手册.html?_blank "系统使用手册")
### System partial screenshot
- [**升级文档**](https://analysys.github.io/easyscheduler_docs_cn/升级文档.html?_blank "升级文档")
![image](https://user-images.githubusercontent.com/48329107/61368744-1f5f3b00-a8c1-11e9-9cf1-10f8557a6b3b.png)
- <a href="http://52.82.13.76:8888" target="_blank">我要体验</a>
![image](https://user-images.githubusercontent.com/48329107/61368966-9dbbdd00-a8c1-11e9-8dcc-a9469d33583e.png)
更多文档请参考 <a href="https://analysys.github.io/easyscheduler_docs_cn/" target="_blank">easyscheduler中文在线文档</a>
![image](https://user-images.githubusercontent.com/48329107/61372146-f347b800-a8c8-11e9-8882-66e8934ada23.png)
### 近期研发计划
### Document
EasyScheduler的工作计划:<a href="https://github.com/analysys/EasyScheduler/projects/1" target="_blank">研发计划</a> ,其中 In Develop卡片下是1.0.2版本的功能,TODO卡片是待做事项(包括 feature ideas)
- <a href="https://analysys.github.io/easyscheduler_docs_cn/后端部署文档.html" target="_blank">Backend deployment documentation</a>
### 贡献代码
- <a href="https://analysys.github.io/easyscheduler_docs_cn/前端部署文档.html" target="_blank">Front-end deployment documentation</a>
非常欢迎大家来参与贡献代码,提交代码流程请参考:
https://github.com/analysys/EasyScheduler/blob/master/CONTRIBUTING.md
- [**User manual**](https://analysys.github.io/easyscheduler_docs_cn/系统使用手册.html?_blank "User manual")
- [**Upgrade document**](https://analysys.github.io/easyscheduler_docs_cn/升级文档.html?_blank "Upgrade document")
- <a href="http://52.82.13.76:8888" target="_blank">Online Demo</a>
### 感谢
More documentation please refer to <a href="https://analysys.github.io/easyscheduler_docs_cn/" target="_blank">[EasyScheduler online documentation]</a>
Easy Scheduler使用了很多优秀的开源项目,比如google的guava、guice、grpc,netty,ali的bonecp,quartz,以及apache的众多开源项目等等,
正是由于站在这些开源项目的肩膀上,才有Easy Scheduler的诞生的可能。对此我们对使用的所有开源软件表示非常的感谢!我们也希望自己不仅是开源的受益者,也能成为开源的
贡献者,于是我们决定把易调度贡献出来,并承诺长期维护。也希望对开源有同样热情和信念的伙伴加入进来,一起为开源献出一份力!
### Recent R&D plan
Work plan of Easy Scheduler: [R&D plan](https://github.com/analysys/EasyScheduler/projects/1), where `In Develop` card is the features of 1.1.0 version , TODO card is to be done (including feature ideas)
### How to contribute code
Welcome to participate in contributing code, please refer to the process of submitting the code:
https://github.com/analysys/EasyScheduler/blob/master/CONTRIBUTING.md
### 帮助
The fastest way to get response from our developers is to submit issues, or add our wechat : 510570367
### Thanks
Easy Scheduler uses a lot of excellent open source projects, such as google guava, guice, grpc, netty, ali bonecp, quartz, and many open source projects of apache, etc.
It is because of the shoulders of these open source projects that the birth of the Easy Scheduler is possible. We are very grateful for all the open source software used! We also hope that we will not only be the beneficiaries of open source, but also be open source contributors, so we decided to contribute to easy scheduling and promised long-term updates. I also hope that partners who have the same passion and conviction for open source will join in and contribute to open source!
### Help
The fastest way to get response from our developers is to submit issues, or add our wechat : 510570367

77
README_zh_CN.md

@ -0,0 +1,77 @@
Easy Scheduler
============
[![License](https://img.shields.io/badge/license-Apache%202-4EB1BA.svg)](https://www.apache.org/licenses/LICENSE-2.0.html)
> Easy Scheduler for Big Data
**设计特点:** 一个分布式易扩展的可视化DAG工作流任务调度系统。致力于解决数据处理流程中错综复杂的依赖关系,使调度系统在数据处理流程中`开箱即用`。
其主要目标如下:
- 以DAG图的方式将Task按照任务的依赖关系关联起来,可实时可视化监控任务的运行状态
- 支持丰富的任务类型:Shell、MR、Spark、SQL(mysql、postgresql、hive、sparksql),Python,Sub_Process、Procedure等
- 支持工作流定时调度、依赖调度、手动调度、手动暂停/停止/恢复,同时支持失败重试/告警、从指定节点恢复失败、Kill任务等操作
- 支持工作流优先级、任务优先级及任务的故障转移及任务超时告警/失败
- 支持工作流全局参数及节点自定义参数设置
- 支持资源文件的在线上传/下载,管理等,支持在线文件创建、编辑
- 支持任务日志在线查看及滚动、在线下载日志等
- 实现集群HA,通过Zookeeper实现Master集群和Worker集群去中心化
- 支持对`Master/Worker` cpu load,memory,cpu在线查看
- 支持工作流运行历史树形/甘特图展示、支持任务状态统计、流程状态统计
- 支持补数
- 支持多租户
- 支持国际化
- 还有更多等待伙伴们探索
### 与同类调度系统的对比
![调度系统对比](http://geek.analysys.cn/static/upload/47/2019-03-01/9609ca82-cf8b-4d91-8dc0-0e2805194747.jpeg)
### 系统部分截图
![](http://geek.analysys.cn/static/upload/221/2019-03-29/0a9dea80-fb02-4fa5-a812-633b67035ffc.jpeg)
![](http://geek.analysys.cn/static/upload/221/2019-04-01/83686def-a54f-4169-8cae-77b1f8300cc1.png)
![](http://geek.analysys.cn/static/upload/221/2019-03-29/83c937c7-1793-4d7a-aa28-b98460329fe0.jpeg)
### 文档
- <a href="https://analysys.github.io/easyscheduler_docs_cn/后端部署文档.html" target="_blank">后端部署文档</a>
- <a href="https://analysys.github.io/easyscheduler_docs_cn/前端部署文档.html" target="_blank">前端部署文档</a>
- [**使用手册**](https://analysys.github.io/easyscheduler_docs_cn/系统使用手册.html?_blank "系统使用手册")
- [**升级文档**](https://analysys.github.io/easyscheduler_docs_cn/升级文档.html?_blank "升级文档")
- <a href="http://52.82.13.76:8888" target="_blank">我要体验</a>
更多文档请参考 <a href="https://analysys.github.io/easyscheduler_docs_cn/" target="_blank">easyscheduler中文在线文档</a>
### 近期研发计划
EasyScheduler的工作计划:<a href="https://github.com/analysys/EasyScheduler/projects/1" target="_blank">研发计划</a> ,其中 In Develop卡片下是1.1.0版本的功能,TODO卡片是待做事项(包括 feature ideas)
### 贡献代码
非常欢迎大家来参与贡献代码,提交代码流程请参考:
https://github.com/analysys/EasyScheduler/blob/master/CONTRIBUTING.md
### 感谢
Easy Scheduler使用了很多优秀的开源项目,比如google的guava、guice、grpc,netty,ali的bonecp,quartz,以及apache的众多开源项目等等,
正是由于站在这些开源项目的肩膀上,才有Easy Scheduler的诞生的可能。对此我们对使用的所有开源软件表示非常的感谢!我们也希望自己不仅是开源的受益者,也能成为开源的
贡献者,于是我们决定把易调度贡献出来,并承诺长期维护。也希望对开源有同样热情和信念的伙伴加入进来,一起为开源献出一份力!
### 帮助
The fastest way to get response from our developers is to submit issues, or add our wechat : 510570367

31
docs/zh_CN/1.0.4-release.md

@ -0,0 +1,31 @@
Easy Scheduler Release 1.0.4
===
Easy Scheduler 1.0.4是1.x系列中的第五个版本。
增强:
===
- [[EasyScheduler-482]](https://github.com/analysys/EasyScheduler/issues/482)sql任务中的邮件标题增加了对自定义变量的支持
- [[EasyScheduler-483]](https://github.com/analysys/EasyScheduler/issues/483)sql任务中的发邮件失败,则此sql任务为失败
- [[EasyScheduler-484]](https://github.com/analysys/EasyScheduler/issues/484)修改sql任务中自定义变量的替换规则,支持多个单引号和双引号的替换
- [[EasyScheduler-485]](https://github.com/analysys/EasyScheduler/issues/485)创建资源文件时,增加对该资源文件是否在hdfs上已存在的验证
- [[EasyScheduler-486]](https://github.com/analysys/EasyScheduler/issues/486)shell进程退出,yarn状态非终态等待判断
修复
===
- [[EasyScheduler-198]](https://github.com/analysys/EasyScheduler/issues/198) 流程定义列表根据定时状态和更新时间进行排序
- [[EasyScheduler-419]](https://github.com/analysys/EasyScheduler/issues/419) 修复在线创建文件,hdfs文件未创建,却返回成功
- [[EasyScheduler-481]](https://github.com/analysys/EasyScheduler/issues/481)修复job不存在定时无法下线的问题
- [[EasyScheduler-425]](https://github.com/analysys/EasyScheduler/issues/425) kill任务时增加对其子进程的kill
- [[EasyScheduler-422]](https://github.com/analysys/EasyScheduler/issues/422) 修复更新资源文件时更新时间和大小未更新的问题
- [[EasyScheduler-431]](https://github.com/analysys/EasyScheduler/issues/431) 修复删除租户时,如果未启动hdfs,则删除租户失败的问题
感谢:
===
最后但最重要的是,没有以下伙伴的贡献就没有新版本的诞生:
Baoqi, jimmy201602, samz406, petersear, millionfor, hyperknob, fanguanqun, yangqinlong, qq389401879, feloxx, coding-now, hymzcn, nysyxxg, chgxtony, gj-zhang, xianhu, sunnyingit,
zhengqiangtan
以及微信群里众多的热心伙伴!在此非常感谢!

23
docs/zh_CN/1.0.5-release.md

@ -0,0 +1,23 @@
Easy Scheduler Release 1.0.4
===
Easy Scheduler 1.0.4是1.x系列中的第个版本。
增强:
===
- [[EasyScheduler-597]](https://github.com/analysys/EasyScheduler/issues/597)child process cannot extend father's receivers and cc
修复
===
- [[EasyScheduler-516]](https://github.com/analysys/EasyScheduler/issues/516)The task instance of MR cannot stop in some cases
- [[EasyScheduler-594]](https://github.com/analysys/EasyScheduler/issues/594)soft kill task 后 进程依旧存在(父进程 子进程)
感谢:
===
最后但最重要的是,没有以下伙伴的贡献就没有新版本的诞生:
Baoqi, jimmy201602, samz406, petersear, millionfor, hyperknob, fanguanqun, yangqinlong, qq389401879, feloxx, coding-now, hymzcn, nysyxxg, chgxtony, gj-zhang, xianhu, sunnyingit,
zhengqiangtan, chinashenkai
以及微信群里众多的热心伙伴!在此非常感谢!

55
docs/zh_CN/1.1.0-release.md

@ -0,0 +1,55 @@
Easy Scheduler Release 1.1.0
===
Easy Scheduler 1.1.0是1.x系列中的第六个版本。
新特性:
===
- [[EasyScheduler-391](https://github.com/analysys/EasyScheduler/issues/391)] run a process under a specified tenement user
- [[EasyScheduler-288](https://github.com/analysys/EasyScheduler/issues/288)] Feature/qiye_weixin
- [[EasyScheduler-189](https://github.com/analysys/EasyScheduler/issues/189)] Kerberos等安全支持
- [[EasyScheduler-398](https://github.com/analysys/EasyScheduler/issues/398)]管理员,有租户(install.sh设置默认租户),可以创建资源、项目和数据源(限制有一个管理员)
- [[EasyScheduler-293](https://github.com/analysys/EasyScheduler/issues/293)]点击运行流程时候选择的参数,没有地方可查看,也没有保存
- [[EasyScheduler-401](https://github.com/analysys/EasyScheduler/issues/401)]定时很容易定时每秒一次,定时完成以后可以在页面显示一下下次触发时间
- [[EasyScheduler-493](https://github.com/analysys/EasyScheduler/pull/493)]add datasource kerberos auth and FAQ modify and add resource upload s3
增强:
===
- [[EasyScheduler-227](https://github.com/analysys/EasyScheduler/issues/227)] upgrade spring-boot to 2.1.x and spring to 5.x
- [[EasyScheduler-434](https://github.com/analysys/EasyScheduler/issues/434)] worker节点数量 zk和mysql中不一致
- [[EasyScheduler-435](https://github.com/analysys/EasyScheduler/issues/435)]邮箱格式的验证
- [[EasyScheduler-441](https://github.com/analysys/EasyScheduler/issues/441)] 禁止运行节点加入已完成节点检测
- [[EasyScheduler-400](https://github.com/analysys/EasyScheduler/issues/400)] 首页页面,队列统计不和谐,命令统计无数据
- [[EasyScheduler-395](https://github.com/analysys/EasyScheduler/issues/395)] 对于容错恢复的流程,状态不能为 **正在运行
- [[EasyScheduler-529](https://github.com/analysys/EasyScheduler/issues/529)] optimize poll task from zookeeper
- [[EasyScheduler-242](https://github.com/analysys/EasyScheduler/issues/242)]worker-server节点获取任务性能问题
- [[EasyScheduler-352](https://github.com/analysys/EasyScheduler/issues/352)]worker 分组, 队列消费问题
- [[EasyScheduler-461](https://github.com/analysys/EasyScheduler/issues/461)]查看数据源参数,需要加密账号密码信息
- [[EasyScheduler-396](https://github.com/analysys/EasyScheduler/issues/396)]Dockerfile优化,并关联Dockerfile和github实现自动打镜像
- [[EasyScheduler-389](https://github.com/analysys/EasyScheduler/issues/389)]service monitor cannot find the change of master/worker
- [[EasyScheduler-511](https://github.com/analysys/EasyScheduler/issues/511)]support recovery process from stop/kill nodes.
- [[EasyScheduler-399](https://github.com/analysys/EasyScheduler/issues/399)]HadoopUtils指定用户操作,而不是 **部署用户
修复:
===
- [[EasyScheduler-394](https://github.com/analysys/EasyScheduler/issues/394)] master&worker部署在同一台机器上时,如果重启master&worker服务,会导致之前调度的任务无法继续调度
- [[EasyScheduler-469](https://github.com/analysys/EasyScheduler/issues/469)]Fix naming errors,monitor page
- [[EasyScheduler-392](https://github.com/analysys/EasyScheduler/issues/392)]Feature request: fix email regex check
- [[EasyScheduler-405](https://github.com/analysys/EasyScheduler/issues/405)]定时修改/添加页面,开始时间和结束时间不能相同
- [[EasyScheduler-517](https://github.com/analysys/EasyScheduler/issues/517)]补数 - 子工作流 - 时间参数
- [[EasyScheduler-532](https://github.com/analysys/EasyScheduler/issues/532)]python节点不执行的问题
- [[EasyScheduler-543](https://github.com/analysys/EasyScheduler/issues/543)]optimize datasource connection params safety
- [[EasyScheduler-569](https://github.com/analysys/EasyScheduler/issues/569)]定时任务无法真正停止
- [[EasyScheduler-463](https://github.com/analysys/EasyScheduler/issues/463)]邮箱验证不支持非常见后缀邮箱
感谢:
===
最后但最重要的是,没有以下伙伴的贡献就没有新版本的诞生:
Baoqi, jimmy201602, samz406, petersear, millionfor, hyperknob, fanguanqun, yangqinlong, qq389401879, chgxtony, Stanfan, lfyee, thisnew, hujiang75277381, sunnyingit, lgbo-ustc, ivivi, lzy305, JackIllkid, telltime, lipengbo2018, wuchunfu, telltime
以及微信群里众多的热心伙伴!在此非常感谢!

271
docs/zh_CN/EasyScheduler-FAQ.md

@ -1,96 +1,287 @@
Q:单机运行服务老挂,应该是内存不够,测试机器4核8G。生产环境需要分布式,如果单机的话建议的配置是?
## Q:EasyScheduler服务介绍及建议运行内存
A: Easy Scheduler有5个服务组成,这些服务本身需要的内存和cpu不多,
A: EasyScheduler由5个服务组成,MasterServer、WorkerServer、ApiServer、AlertServer、LoggerServer和UI。
| 服务 | 内存 | cpu核数 |
| ------------ | ---- | ------- |
| MasterServer | 2G | 2核 |
| WorkerServer | 2G | 2核 |
| ApiServer | 512M | 1核 |
| AlertServer | 512M | 1核 |
| LoggerServer | 512M | 1核 |
| 服务 | 说明 |
| ------------------------- | ------------------------------------------------------------ |
| MasterServer | 主要负责 **DAG** 的切分和任务状态的监控 |
| WorkerServer/LoggerServer | 主要负责任务的提交、执行和任务状态的更新。LoggerServer用于Rest Api通过 **RPC** 查看日志 |
| ApiServer | 提供Rest Api服务,供UI进行调用 |
| AlertServer | 提供告警服务 |
| UI | 前端页面展示 |
注意:由于如果任务较多,WorkServer所在机器建议物理内存在16G以上
注意:**由于服务比较多,建议单机部署最好是4核16G以上**
---
## Q: 管理员为什么不能创建项目
A:管理员目前属于"**纯管理**", 没有租户,即没有linux上对应的用户,所以没有执行权限, **故没有所属的项目、资源及数据源**,所以没有创建权限。**但是有所有的查看权限**。如果需要创建项目等业务操作,**请使用管理员创建租户和普通用户,然后使用普通用户登录进行操作**。我们将会在1.1.0版本中将管理员的创建和执行权限放开,管理员将会有所有的权限
---
## Q:系统支持哪些邮箱?
A:支持绝大多数邮箱,qq、163、126、139、outlook、aliyun等皆支持。支持**TLS和SSL**协议,可以在alert.properties中选择性配置
---
## Q:常用的系统变量时间参数有哪些,如何使用?
A:请参考 https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C.html#%E7%B3%BB%E7%BB%9F%E5%8F%82%E6%95%B0
---
## Q:pip install kazoo 这个安装报错。是必须安装的吗?
A: 这个是python连接zookeeper需要使用到的,必须要安装
---
Q: 管理员为什么不能创建项目?
## Q: 怎么指定机器运行任务
A: 管理员目前属于"纯管理", 没有租户,即没有linux上对应的用户,所以没有执行权限, 但是有所有的查看权限。如果需要创建项目等业务操作,请使用管理员创建租户和普通用户,然后使用普通用户登录进行操作
A:使用 **管理员** 创建Worker分组,在 **流程定义启动** 的时候可**指定Worker分组**或者在**任务节点上指定Worker分组**。如果不指定,则使用Default,**Default默认是使用的集群里所有的Worker中随机选取一台来进行任务提交、执行**
---
Q: 系统支持哪些邮箱?
## Q:任务的优先级
A:我们同时 **支持流程和任务的优先级**。优先级我们有 **HIGHEST、HIGH、MEDIUM、LOW和LOWEST** 五种级别。**可以设置不同流程实例之间的优先级,也可以设置同一个流程实例中不同任务实例的优先级**。详细内容请参考任务优先级设计 https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1.html#%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1
----
## Q:escheduler-grpc报错
A:在根目录下执行:mvn -U clean package assembly:assembly -Dmaven.test.skip=true , 然后刷新下整个项目
----
## Q:EasyScheduler支持windows上运行么
A: 理论上只有**Worker是需要在Linux上运行的**,其它的服务都是可以在windows上正常运行的。但是还是建议最好能在linux上部署使用
-----
## Q:UI 在 linux 编译node-sass提示:Error:EACCESS:permission denied,mkdir xxxx
A: 支持绝大多数邮箱,qq、163、126、139、outlook、aliyun等皆可支持
A:单独安装 **npm install node-sass --unsafe-perm**,之后再 **npm install**
---
Q:常用的系统变量时间参数有哪些,如何使用?
## Q:UI 不能正常登陆访问
A: 请参考使用手册中的系统参数
A: 1,如果是node启动的查看escheduler-ui下的.env API_BASE配置是否是Api Server服务地址
2,如果是nginx启动的并且是通过 **install-escheduler-ui.sh** 安装的,查看 **/etc/nginx/conf.d/escheduler.conf** 中的proxy_pass配置是否是Api Server服务地址
3,如果以上配置都是正确的,那么请查看Api Server服务是否是正常的,curl http://192.168.xx.xx:12345/escheduler/users/get-user-info,查看Api Server日志,如果提示 cn.escheduler.api.interceptor.LoginHandlerInterceptor:[76] - session info is null,则证明Api Server服务是正常的
4,如果以上都没有问题,需要查看一下 **application.properties** 中的 **server.context-path 和 server.port 配置**是否正确
---
Q:pip install kazoo 这个安装报错。是必须安装的吗?
## Q: 流程定义手动启动或调度启动之后,没有流程实例生成
A: 1,首先通过**jps 查看MasterServer服务是否存在**,或者从服务监控直接查看zk中是否存在master服务
A: 这个是python连接zookeeper需要使用到的
2,如果存在master服务,查看 **命令状态统计** 或者 **t_escheduler_error_command** 中是否增加的新记录,如果增加了,**请查看 message 字段定位启动异常原因**
---
Q: 如果alert、api、logger服务任意一个宕机,任何还会正常执行吧
## Q : 任务状态一直处于提交成功状态
A: 不影响,影响正在运行中的任务的服务有Master和Worker服务
A: 1,首先通过**jps 查看WorkerServer服务是否存在**,或者从服务监控直接查看zk中是否存在worker服务
2,如果 **WorkerServer** 服务正常,需要 **查看MasterServer是否把task任务放到zk队列中** ,**需要查看MasterServer日志及zk队列中是否有任务阻塞**
3,如果以上都没有问题,需要定位是否指定了Worker分组,但是 **Worker分组的机器不是在线状态**
---
Q: 这个怎么指定机器运行任务的啊 」
## Q: 是否提供Docker镜像及Dockerfile
A: 提供Docker镜像及Dockerfile。
A: 通过worker分组: 这个流程只能在指定的机器组里执行。默认是Default,可以在任一worker上执行。
Docker镜像地址:https://hub.docker.com/r/escheduler/escheduler_images
Dockerfile地址:https://github.com/qiaozhanwei/escheduler_dockerfile/tree/master/docker_escheduler
---
Q: 跨用户的任务依赖怎么实现呢, 比如A用户写了一个任务,B用户需要依赖这个任务
## Q : install.sh 中需要注意问题
A: 1,如果替换变量中包含特殊字符,**请用 \ 转移符进行转移**
2,installPath="/data1_1T/escheduler",**这个目录不能和当前要一键安装的install.sh目录是一样的**
就比如说 我们数仓组 写了一个 中间宽表的任务, 其他业务部门想要使用这个中间表的时候,他们应该是另外一个用户,怎么依赖这个中间表呢
3,deployUser="escheduler",**部署用户必须具有sudo权限**,因为worker是通过sudo -u 租户 sh xxx.command进行执行的
A: 有两种情况,一个是要运行这个宽表任务,可以使用子工作流把宽表任务放到自己的工作流里面。另一个是检查这个宽表任务有没有完成,可以使用依赖节点来检查这个宽表任务在指定的时间周期有没有完成。
4,monitorServerState="false",服务监控脚本是否启动,默认是不启动服务监控脚本的。**如果启动服务监控脚本,则每5分钟定时来监控master和worker的服务是否down机,如果down机则会自动重启**
5,hdfsStartupSate="false",是否开启HDFS资源上传功能。默认是不开启的,**如果不开启则资源中心是不能使用的**。如果开启,需要conf/common/hadoop/hadoop.properties中配置fs.defaultFS和yarn的相关配置,如果使用namenode HA,需要将core-site.xml和hdfs-site.xml复制到conf根目录下
注意:**1.0.x版本是不会自动创建hdfs根目录的,需要自行创建,并且需要部署用户有hdfs的操作权限**
---
Q: 启动WorkerServer服务时不能正常启动,报以下信息是什么原因?
## Q : 流程定义和流程实例下线异常
A : 对于 **1.0.4 以前的版本中**,修改escheduler-api cn.escheduler.api.quartz包下的代码即可
```
[INFO] 2019-05-06 16:39:31.492 cn.escheduler.server.zk.ZKWorkerClient:[155] - register failure , worker already started on : 127.0.0.1, please wait for a moment and try again
public boolean deleteJob(String jobName, String jobGroupName) {
lock.writeLock().lock();
try {
JobKey jobKey = new JobKey(jobName,jobGroupName);
if(scheduler.checkExists(jobKey)){
logger.info("try to delete job, job name: {}, job group name: {},", jobName, jobGroupName);
return scheduler.deleteJob(jobKey);
}else {
return true;
}
} catch (SchedulerException e) {
logger.error(String.format("delete job : %s failed",jobName), e);
} finally {
lock.writeLock().unlock();
}
return false;
}
```
A:Worker/Master Server在启动时,会向Zookeeper注册自己的启动信息,是Zookeeper的临时节点,如果两次启动时间间隔较短的情况,上次启动的Worker/Master Server在Zookeeper的会话还未过期,会出现上述信息,处理办法是等待session过期,一般是1分钟左右
---
----
## Q : HDFS启动之前创建的租户,能正常使用资源中心吗
Q: 编译时escheduler-grpc模块一直报错:Information:java: Errors occurred while compiling module 'escheduler-rpc', 找不到LogParameter、RetStrInfo、RetByteInfo等class类
A: 不能。因为在未启动HDFS创建的租户,不会在HDFS中注册租户目录。所以上次资源会报错
A: 这是因为rpc源码包是google Grpc实现的,需要使用maven进行编译,在根目录下执行:mvn -U clean package assembly:assembly -Dmaven.test.skip=true , 然后刷新下整个项目
## Q : 多Master和多Worker状态下,服务掉了,怎么容错
----
A: **注意:Master监控Master及Worker服务。**
Q:EasyScheduler支持windows上运行么?
1,如果Master服务掉了,其它的Master会接管挂掉的Master的流程,继续监控Worker task状态
A: 建议在Ubuntu、Centos上运行,暂不支持windows上运行,不过windows上可以进行编译。开发调试的话建议Ubuntu或者mac上进行。
2,如果Worker服务掉,Master会监控到Worker服务掉了,如果存在Yarn任务,Kill Yarn任务之后走重试
具体请看容错设计:https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1.html#%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1
---
## Q : 对于Master和Worker一台机器伪分布式下的容错
A : 1.0.3 版本只实现了Master启动流程容错,不走Worker容错。也就是说如果Worker挂掉的时候,没有Master存在。这流程将会出现问题。我们会在 **1.1.0** 版本中增加Master和Worker启动自容错,修复这个问题。如果想手动修改这个问题,需要针对 **跨重启正在运行流程** **并且已经掉的正在运行的Worker任务,需要修改为失败**,**同时跨重启正在运行流程设置为失败状态**。然后从失败节点进行流程恢复即可
---
## Q : 定时容易设置成每秒执行
A : 设置定时的时候需要注意,如果第一位(* * * * * ? *)设置成 \* ,则表示每秒执行。**我们将会在1.1.0版本中加入显示最近调度的时间列表** ,使用http://cron.qqe2.com/ 可以在线看近5次运行时间
-----
Q:任务为什么不执行?
A: 不执行的原因:
## Q: 定时有有效时间范围吗
查看command表里有没有内容?
A:有的,**如果定时的起止时间是同一个时间,那么此定时将是无效的定时**。**如果起止时间的结束时间比当前的时间小,很有可能定时会被自动删除**
查看Master server的运行日志:
查看Worker Server的运行日志
## Q : 任务依赖有几种实现
A: 1,**DAG** 之间的任务依赖关系,是从 **入度为零** 进行DAG切分的
2,有 **任务依赖节点** ,可以实现跨流程的任务或者流程依赖,具体请参考 依赖(DEPENDENT)节点:https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C.html#%E4%BB%BB%E5%8A%A1%E8%8A%82%E7%82%B9%E7%B1%BB%E5%9E%8B%E5%92%8C%E5%8F%82%E6%95%B0%E8%AE%BE%E7%BD%AE
注意:**不支持跨项目的流程或任务依赖**
## Q: 流程定义有几种启动方式
A: 1,在 **流程定义列表**,点击 **启动** 按钮
2,**流程定义列表添加定时器**,调度启动流程定义
3,流程定义 **查看或编辑** DAG 页面,任意 **任务节点右击** 启动流程定义
4,可以对流程定义 DAG 编辑,设置某些任务的运行标志位 **禁止运行**,则在启动流程定义的时候,将该节点的连线将从DAG中去掉
## Q : Python任务设置Python版本
A: 1,对于1**.0.3之后的版本**只需要修改 conf/env/.escheduler_env.sh中的PYTHON_HOME
```
export PYTHON_HOME=/bin/python
```
注意:这了 **PYTHON_HOME** ,是python命令的绝对路径,而不是单纯的 PYTHON_HOME,还需要注意的是 export PATH 的时候,需要直接
```
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
```
2,对 1.0.3 之前的版本,Python任务只能支持系统的Python版本,不支持指定Python版本
## Q: Worker Task 通过sudo -u 租户 sh xxx.command会产生子进程,在kill的时候,是否会杀掉
A: 我们会在1.0.4中增加kill任务同时,kill掉任务产生的各种所有子进程
## Q : EasyScheduler中的队列怎么用,用户队列和租户队列是什么意思
A : EasyScheduler 中的队列可以在用户或者租户上指定队列,**用户指定的队列优先级是高于租户队列的优先级的。**,例如:对MR任务指定队列,是通过 mapreduce.job.queuename 来指定队列的。
注意:MR在用以上方法指定队列的时候,传递参数请使用如下方式:
```
Configuration conf = new Configuration();
GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
String[] remainingArgs = optionParser.getRemainingArgs();
```
如果是Spark任务 --queue 方式指定队列
## Q : Master 或者 Worker报如下告警
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_worker_lack_res.png" width="60%" />
</p>
A : 修改conf下的 master.properties **master.reserved.memory** 的值为更小的值,比如说0.1 或者
worker.properties **worker.reserved.memory** 的值为更小的值,比如说0.1
## Q : hive版本是1.1.0+cdh5.15.0,SQL hive任务连接报错
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/cdh_hive_error.png" width="60%" />
</p>
A : 将 hive pom
```
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>2.1.0</version>
</dependency>
```
修改为
```
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>1.1.0</version>
</dependency>
```

1
docs/zh_CN/SUMMARY.md

@ -35,6 +35,7 @@
* 系统版本升级文档
* [版本升级](升级文档.md)
* 历次版本发布内容
* [1.1.0 release](1.1.0-release.md)
* [1.0.3 release](1.0.3-release.md)
* [1.0.2 release](1.0.2-release.md)
* [1.0.1 release](1.0.1-release.md)

2
docs/zh_CN/book.json

@ -1,6 +1,6 @@
{
"title": "调度系统-EasyScheduler",
"author": "YIGUAN",
"author": "",
"description": "调度系统",
"language": "zh-hans",
"gitbook": "3.2.3",

BIN
docs/zh_CN/images/cdh_hive_error.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

BIN
docs/zh_CN/images/hive_kerberos.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

BIN
docs/zh_CN/images/master_worker_lack_res.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

BIN
docs/zh_CN/images/sparksql_kerberos.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

4
docs/zh_CN/前端部署文档.md

@ -5,9 +5,9 @@
## 1、准备工作
#### 下载安装包
目前最新安装包版本是1.0.2,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/)
请下载最新版本的安装包,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/)
下载 escheduler-ui-1.0.2.tar.gz 后,解压`tar -zxvf escheduler-ui-1.0.2.tar.gz ./`后,进入`escheduler-ui`目录
下载 escheduler-ui-x.x.x.tar.gz 后,解压`tar -zxvf escheduler-ui-x.x.x.tar.gz ./`后,进入`escheduler-ui`目录

6
docs/zh_CN/后端部署文档.md

@ -4,7 +4,7 @@
## 1、准备工作
目前最新安装包版本是1.0.3,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/) ,下载escheduler-backend-1.0.3.tar.gz(后端简称escheduler-backend),escheduler-ui-1.0.3.tar.gz(前端简称escheduler-ui)
请下载最新版本的安装包,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/) ,下载escheduler-backend-x.x.x.tar.gz(后端简称escheduler-backend),escheduler-ui-x.x.x.tar.gz(前端简称escheduler-ui)
#### 准备一: 基础软件安装(必装项请自行安装)
@ -66,7 +66,7 @@ escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL
mysql -h {host} -u {user} -p{password} -D {db} < quartz.sql
```
* 1.0.2版本创建表和导入基础数据
* 1.0.2之后(含1.0.2)版本创建表和导入基础数据
修改conf/dao/data_source.properties中的下列属性
```
@ -149,7 +149,7 @@ install.sh : 一键部署脚本
### 2.2 编译源码来部署
将源码包release版本1.0.3下载后,解压进入根目录
将源码包release版本下载后,解压进入根目录
* 执行编译命令:

2
docs/zh_CN/快速上手.md

@ -1,7 +1,7 @@
# 快速上手
* 管理员用户登录
>地址:192.168.xx.xx:8888 用户名密码:admin/esheduler123
>地址:192.168.xx.xx:8888 用户名密码:admin/escheduler123
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/login.jpg" width="60%" />

62
docs/zh_CN/系统使用手册.md

@ -60,7 +60,7 @@
### 执行流程定义
- **未上线状态的流程定义可以编辑,但是不可以运行**,所以先上线工作流
> 点击工作流定义,返回流程定义列表,点击”上线“图标,上线工作流定义。
> "下线"工作流之前,要先将定时管理的定时下线,才能成功下线工作流定义
- 点击”运行“,执行工作流。运行参数说明:
@ -98,28 +98,28 @@
### 查看流程实例
> 点击“工作流实例”,查看流程实例列表。
> 点击工作流名称,查看任务执行状态。
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/instance-detail.png" width="60%" />
</p>
> 点击任务节点,点击“查看日志”,查看任务执行日志。
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/task-log.png" width="60%" />
</p>
> 点击任务实例节点,点击**查看历史**,可以查看该流程实例运行的该任务实例列表
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/task_history.png" width="60%" />
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_history.png" width="60%" />
</p>
> 对工作流实例的操作:
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/instance-list.png" width="60%" />
</p>
@ -165,7 +165,7 @@
- 密码:设置连接MySQL的密码
- 数据库名:输入连接MySQL的数据库名称
- Jdbc连接参数:用于MySQL连接的参数设置,以JSON形式填写
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mysql_edit.png" width="60%" />
</p>
@ -191,7 +191,7 @@
#### 创建、编辑HIVE数据源
1.使用HiveServer2方式连接
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/hive_edit.png" width="60%" />
</p>
@ -207,12 +207,20 @@
- Jdbc连接参数:用于HIVE连接的参数设置,以JSON形式填写
2.使用HiveServer2 HA Zookeeper方式连接
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/hive_edit2.png" width="60%" />
</p>
注意:如果开启了**kerberos**,则需要填写 **Principal**
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/hive_kerberos.png" width="60%" />
</p>
#### 创建、编辑Spark数据源
<p align="center">
@ -229,9 +237,17 @@
- 数据库名:输入连接Spark的数据库名称
- Jdbc连接参数:用于Spark连接的参数设置,以JSON形式填写
注意:如果开启了**kerberos**,则需要填写 **Principal**
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/sparksql_kerberos.png" width="60%" />
</p>
### 上传资源
- 上传资源文件和udf函数,所有上传的文件和资源都会被存储到hdfs上,所以需要以下配置项:
```
conf/common/common.properties
-- hdfs.startup.state=true
@ -242,7 +258,7 @@ conf/common/hadoop.properties
```
#### 文件管理
> 是对各种资源文件的管理,包括创建基本的txt/log/sh/conf等文件、上传jar包等各种类型文件,以及编辑、下载、删除等操作。
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/file-manage.png" width="60%" />
@ -287,7 +303,7 @@ conf/common/hadoop.properties
#### 资源管理
> 资源管理和文件管理功能类似,不同之处是资源管理是上传的UDF函数,文件管理上传的是用户程序,脚本及配置文件
* 上传udf资源
> 和上传文件相同。
@ -303,7 +319,7 @@ conf/common/hadoop.properties
- 参数:用来标注函数的输入参数
- 数据库名:预留字段,用于创建永久UDF函数
- UDF资源:设置创建的UDF对应的资源文件
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/udf_edit.png" width="60%" />
</p>
@ -312,7 +328,7 @@ conf/common/hadoop.properties
- 安全中心是只有管理员账户才有权限的功能,有队列管理、租户管理、用户管理、告警组管理、worker分组、令牌管理等功能,还可以对资源、数据源、项目等授权
- 管理员登录,默认用户名密码:admin/escheduler123
### 创建队列
- 队列是在执行spark、mapreduce等程序,需要用到“队列”参数时使用的。
- “安全中心”->“队列管理”->“创建队列”
@ -357,7 +373,7 @@ conf/common/hadoop.properties
### 令牌管理
- 由于后端接口有登录检查,令牌管理,提供了一种可以通过调用接口的方式对系统进行各种操作。
- 调用示例:
```令牌调用示例
/**
* test token
@ -477,15 +493,15 @@ conf/common/hadoop.properties
### 依赖(DEPENDENT)节点
- 依赖节点,就是**依赖检查节点**。比如A流程依赖昨天的B流程执行成功,依赖节点会去检查B流程在昨天是否有执行成功的实例。
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dependent_edit.png" width="60%" />
</p>
> 依赖节点提供了逻辑判断功能,比如检查昨天的B流程是否成功,或者C流程是否执行成功。
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/depend-node.png" width="80%" />
</p>
@ -536,7 +552,7 @@ conf/common/hadoop.properties
### SPARK节点
- 通过SPARK节点,可以直接直接执行SPARK程序,对于spark节点,worker会使用`spark-submit`方式提交任务
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SPARK.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
@ -563,7 +579,7 @@ conf/common/hadoop.properties
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_MR.png)任务节点到画板中,双击任务节点,如下图:
1. JAVA程序
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mr_java.png" width="60%" />
</p>
@ -592,7 +608,7 @@ conf/common/hadoop.properties
### Python节点
- 使用python节点,可以直接执行python脚本,对于python节点,worker会使用`python **`方式提交任务。
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PYTHON.png)任务节点到画板中,双击任务节点,如下图:

6
docs/zh_CN/系统架构设计.md

@ -13,13 +13,13 @@
**流程定义**:通过拖拽任务节点并建立任务节点的关联所形成的可视化**DAG**
**流程实例**:流程实例是流程定义的实例化,可以通过手动启动或定时调度生成
**流程实例**:流程实例是流程定义的实例化,可以通过手动启动或定时调度生成,流程定义每运行一次,产生一个流程实例
**任务实例**:任务实例是流程定义中任务节点的实例化,标识着具体的任务执行状态
**任务类型**: 目前支持有SHELL、SQL、SUB_PROCESS、PROCEDURE、MR、SPARK、PYTHON、DEPENDENT,同时计划支持动态插件扩展,注意:其中子 **SUB_PROCESS** 也是一个单独的流程定义,是可以单独启动执行的
**任务类型**: 目前支持有SHELL、SQL、SUB_PROCESS(子流程)、PROCEDURE、MR、SPARK、PYTHON、DEPENDENT(依赖),同时计划支持动态插件扩展,注意:其中子 **SUB_PROCESS** 也是一个单独的流程定义,是可以单独启动执行的
**调度方式:** 系统支持基于cron表达式的定时调度和手动调度。命令类型支持:启动工作流、从当前节点开始执行、恢复被容错的工作流、恢复暂停流程、从失败节点开始执行、补数、调度、重跑、暂停、停止、恢复等待线程。其中 **恢复被容错的工作流****恢复等待线程** 两种命令类型是由调度内部控制使用,外部无法调用
**调度方式:** 系统支持基于cron表达式的定时调度和手动调度。命令类型支持:启动工作流、从当前节点开始执行、恢复被容错的工作流、恢复暂停流程、从失败节点开始执行、补数、定时、重跑、暂停、停止、恢复等待线程。其中 **恢复被容错的工作流****恢复等待线程** 两种命令类型是由调度内部控制使用,外部无法调用
**定时调度**:系统采用 **quartz** 分布式调度器,并同时支持cron表达式可视化的生成

2
escheduler-alert/pom.xml

@ -4,7 +4,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.3-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<artifactId>escheduler-alert</artifactId>
<packaging>jar</packaging>

57
escheduler-alert/src/main/java/cn/escheduler/alert/manager/EnterpriseWeChatManager.java

@ -0,0 +1,57 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.alert.manager;
import cn.escheduler.alert.utils.Constants;
import cn.escheduler.alert.utils.EnterpriseWeChatUtils;
import cn.escheduler.dao.model.Alert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Enterprise WeChat Manager
*/
public class EnterpriseWeChatManager {
private static final Logger logger = LoggerFactory.getLogger(MsgManager.class);
/**
* Enterprise We Chat send
* @param alert
*/
public Map<String,Object> send(Alert alert, String token){
Map<String,Object> retMap = new HashMap<>();
retMap.put(Constants.STATUS, false);
String agentId = EnterpriseWeChatUtils.enterpriseWeChatAgentId;
String users = EnterpriseWeChatUtils.enterpriseWeChatUsers;
List<String> userList = Arrays.asList(users.split(","));
logger.info("send message {}",alert);
String msg = EnterpriseWeChatUtils.makeUserSendMsg(userList, agentId,EnterpriseWeChatUtils.markdownByAlert(alert));
try {
EnterpriseWeChatUtils.sendEnterpriseWeChat(Constants.UTF_8, msg, token);
} catch (IOException e) {
logger.error(e.getMessage(),e);
}
retMap.put(Constants.STATUS, true);
return retMap;
}
}

9
escheduler-alert/src/main/java/cn/escheduler/alert/runner/AlertSender.java

@ -17,7 +17,9 @@
package cn.escheduler.alert.runner;
import cn.escheduler.alert.manager.EmailManager;
import cn.escheduler.alert.manager.EnterpriseWeChatManager;
import cn.escheduler.alert.utils.Constants;
import cn.escheduler.alert.utils.EnterpriseWeChatUtils;
import cn.escheduler.common.enums.AlertStatus;
import cn.escheduler.common.enums.AlertType;
import cn.escheduler.dao.AlertDao;
@ -40,6 +42,7 @@ public class AlertSender{
private static final Logger logger = LoggerFactory.getLogger(AlertSender.class);
private static final EmailManager emailManager= new EmailManager();
private static final EnterpriseWeChatManager weChatManager= new EnterpriseWeChatManager();
private List<Alert> alertList;
@ -109,6 +112,12 @@ public class AlertSender{
if (flag){
alertDao.updateAlert(AlertStatus.EXECUTION_SUCCESS, "execution success", alert.getId());
logger.info("alert send success");
try {
String token = EnterpriseWeChatUtils.getToken();
weChatManager.send(alert,token);
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
}else {
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE,String.valueOf(retMaps.get(Constants.MESSAGE)),alert.getId());
logger.info("alert send error : {}" , String.valueOf(retMaps.get(Constants.MESSAGE)));

20
escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java

@ -128,4 +128,24 @@ public class Constants {
public static final String TH_END = "</th>";
public static final int ALERT_SCAN_INTERVEL = 5000;
public static final String MARKDOWN_QUOTE = ">";
public static final String MARKDOWN_ENTER = "\n";
public static final String ENTERPRISE_WECHAT_CORP_ID = "enterprise.wechat.corp.id";
public static final String ENTERPRISE_WECHAT_SECRET = "enterprise.wechat.secret";
public static final String ENTERPRISE_WECHAT_TOKEN_URL = "enterprise.wechat.token.url";
public static final String ENTERPRISE_WECHAT_PUSH_URL = "enterprise.wechat.push.url";
public static final String ENTERPRISE_WECHAT_TEAM_SEND_MSG = "enterprise.wechat.team.send.msg";
public static final String ENTERPRISE_WECHAT_USER_SEND_MSG = "enterprise.wechat.user.send.msg";
public static final String ENTERPRISE_WECHAT_AGENT_ID = "enterprise.wechat.agent.id";
public static final String ENTERPRISE_WECHAT_USERS = "enterprise.wechat.users";
}

248
escheduler-alert/src/main/java/cn/escheduler/alert/utils/EnterpriseWeChatUtils.java

@ -0,0 +1,248 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.alert.utils;
import cn.escheduler.common.enums.ShowType;
import cn.escheduler.dao.model.Alert;
import com.alibaba.fastjson.JSON;
import com.google.common.reflect.TypeToken;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
import static cn.escheduler.alert.utils.PropertyUtils.getString;
/**
* Enterprise WeChat utils
*/
public class EnterpriseWeChatUtils {
public static final Logger logger = LoggerFactory.getLogger(EnterpriseWeChatUtils.class);
private static final String enterpriseWeChatCorpId = getString(Constants.ENTERPRISE_WECHAT_CORP_ID);
private static final String enterpriseWeChatSecret = getString(Constants.ENTERPRISE_WECHAT_SECRET);
private static final String enterpriseWeChatTokenUrl = getString(Constants.ENTERPRISE_WECHAT_TOKEN_URL);
private static String enterpriseWeChatTokenUrlReplace = enterpriseWeChatTokenUrl
.replaceAll("\\$corpId", enterpriseWeChatCorpId)
.replaceAll("\\$secret", enterpriseWeChatSecret);
private static final String enterpriseWeChatPushUrl = getString(Constants.ENTERPRISE_WECHAT_PUSH_URL);
private static final String enterpriseWeChatTeamSendMsg = getString(Constants.ENTERPRISE_WECHAT_TEAM_SEND_MSG);
private static final String enterpriseWeChatUserSendMsg = getString(Constants.ENTERPRISE_WECHAT_USER_SEND_MSG);
public static final String enterpriseWeChatAgentId = getString(Constants.ENTERPRISE_WECHAT_AGENT_ID);
public static final String enterpriseWeChatUsers = getString(Constants.ENTERPRISE_WECHAT_USERS);
/**
* get Enterprise WeChat token info
* @return token string info
* @throws IOException
*/
public static String getToken() throws IOException {
String resp;
CloseableHttpClient httpClient = HttpClients.createDefault();
HttpGet httpGet = new HttpGet(enterpriseWeChatTokenUrlReplace);
CloseableHttpResponse response = httpClient.execute(httpGet);
try {
HttpEntity entity = response.getEntity();
resp = EntityUtils.toString(entity, Constants.UTF_8);
EntityUtils.consume(entity);
} finally {
response.close();
}
Map<String, Object> map = JSON.parseObject(resp,
new TypeToken<Map<String, Object>>() {
}.getType());
return map.get("access_token").toString();
}
/**
* make team single Enterprise WeChat message
* @param toParty
* @param agentId
* @param msg
* @return Enterprise WeChat send message
*/
public static String makeTeamSendMsg(String toParty, String agentId, String msg) {
return enterpriseWeChatTeamSendMsg.replaceAll("\\$toParty", toParty)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
}
/**
* make team multi Enterprise WeChat message
* @param toParty
* @param agentId
* @param msg
* @return Enterprise WeChat send message
*/
public static String makeTeamSendMsg(Collection<String> toParty, String agentId, String msg) {
String listParty = FuncUtils.mkString(toParty, "|");
return enterpriseWeChatTeamSendMsg.replaceAll("\\$toParty", listParty)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
}
/**
* make team single user message
* @param toUser
* @param agentId
* @param msg
* @return Enterprise WeChat send message
*/
public static String makeUserSendMsg(String toUser, String agentId, String msg) {
return enterpriseWeChatUserSendMsg.replaceAll("\\$toUser", toUser)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
}
/**
* make team multi user message
* @param toUser
* @param agentId
* @param msg
* @return Enterprise WeChat send message
*/
public static String makeUserSendMsg(Collection<String> toUser, String agentId, String msg) {
String listUser = FuncUtils.mkString(toUser, "|");
return enterpriseWeChatUserSendMsg.replaceAll("\\$toUser", listUser)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
}
/**
* send Enterprise WeChat
* @param charset
* @param data
* @param token
* @return Enterprise WeChat resp, demo: {"errcode":0,"errmsg":"ok","invaliduser":""}
* @throws IOException
*/
public static String sendEnterpriseWeChat(String charset, String data, String token) throws IOException {
String enterpriseWeChatPushUrlReplace = enterpriseWeChatPushUrl.replaceAll("\\$token", token);
CloseableHttpClient httpclient = HttpClients.createDefault();
HttpPost httpPost = new HttpPost(enterpriseWeChatPushUrlReplace);
httpPost.setEntity(new StringEntity(data, charset));
CloseableHttpResponse response = httpclient.execute(httpPost);
String resp;
try {
HttpEntity entity = response.getEntity();
resp = EntityUtils.toString(entity, charset);
EntityUtils.consume(entity);
} finally {
response.close();
}
logger.info("Enterprise WeChat send [{}], param:{}, resp:{}", enterpriseWeChatPushUrl, data, resp);
return resp;
}
/**
* convert table to markdown style
* @param title
* @param content
* @return
*/
public static String markdownTable(String title,String content){
List<LinkedHashMap> mapItemsList = JSONUtils.toList(content, LinkedHashMap.class);
StringBuilder contents = new StringBuilder(200);
for (LinkedHashMap mapItems : mapItemsList){
Set<Map.Entry<String, String>> entries = mapItems.entrySet();
Iterator<Map.Entry<String, String>> iterator = entries.iterator();
StringBuilder t = new StringBuilder(String.format("`%s`%s",title,Constants.MARKDOWN_ENTER));
while (iterator.hasNext()){
Map.Entry<String, String> entry = iterator.next();
t.append(Constants.MARKDOWN_QUOTE);
t.append(entry.getKey()).append(":").append(entry.getValue());
t.append(Constants.MARKDOWN_ENTER);
}
contents.append(t);
}
return contents.toString();
}
/**
* convert text to markdown style
* @param title
* @param content
* @return
*/
public static String markdownText(String title,String content){
if (StringUtils.isNotEmpty(content)){
List<String> list;
try {
list = JSONUtils.toList(content,String.class);
}catch (Exception e){
logger.error("json format exception",e);
return null;
}
StringBuilder contents = new StringBuilder(100);
contents.append(String.format("`%s`\n",title));
for (String str : list){
contents.append(Constants.MARKDOWN_QUOTE);
contents.append(str);
contents.append(Constants.MARKDOWN_ENTER);
}
return contents.toString();
}
return null;
}
/**
* Determine the mardown style based on the show type of the alert
* @param alert
* @return
*/
public static String markdownByAlert(Alert alert){
String result = "";
if (alert.getShowType() == ShowType.TABLE) {
result = markdownTable(alert.getTitle(),alert.getContent());
}else if(alert.getShowType() == ShowType.TEXT){
result = markdownText(alert.getTitle(),alert.getContent());
}
return result;
}
}

35
escheduler-alert/src/main/java/cn/escheduler/alert/utils/FuncUtils.java

@ -0,0 +1,35 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.alert.utils;
public class FuncUtils {
static public String mkString(Iterable<String> list, String split) {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (String item : list) {
if (first) {
first = false;
} else {
sb.append(split);
}
sb.append(item);
}
return sb.toString();
}
}

1
escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java

@ -165,6 +165,7 @@ public class MailUtils {
return retMap;
}catch (Exception e){
handleException(receivers, retMap, e);
return retMap;
}
}
return retMap;

9
escheduler-alert/src/main/resources/alert.properties

@ -16,6 +16,15 @@ mail.smtp.ssl.enable=true
#xls file path,need create if not exist
xls.file.path=/tmp/xls
# Enterprise WeChat configuration
enterprise.wechat.corp.id=xxxxxxx
enterprise.wechat.secret=xxxxxxx
enterprise.wechat.agent.id=xxxxxxx
enterprise.wechat.users=xxxxxxx
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}

117
escheduler-alert/src/test/java/cn/escheduler/alert/utils/EnterpriseWeChatUtilsTest.java

@ -0,0 +1,117 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.alert.utils;
import com.alibaba.fastjson.JSON;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
/**
* Please manually modify the configuration file before testing.
* file: alert.properties
* enterprise.wechat.corp.id
* enterprise.wechat.secret
* enterprise.wechat.token.url
* enterprise.wechat.push.url
* enterprise.wechat.send.msg
* enterprise.wechat.agent.id
* enterprise.wechat.users
*/
public class EnterpriseWeChatUtilsTest {
private String agentId = PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_AGENT_ID); // app id
private Collection<String> listUserId = Arrays.asList(PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_USERS).split(","));
// Please change
private String partyId = "2";
private Collection<String> listPartyId = Arrays.asList("2","4");
@Test
public void testSendSingleTeamWeChat() {
try {
String token = EnterpriseWeChatUtils.getToken();
String msg = EnterpriseWeChatUtils.makeTeamSendMsg(partyId, agentId, "hello world");
String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
public void testSendMultiTeamWeChat() {
try {
String token = EnterpriseWeChatUtils.getToken();
String msg = EnterpriseWeChatUtils.makeTeamSendMsg(listPartyId, agentId, "hello world");
String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
public void testSendSingleUserWeChat() {
try {
String token = EnterpriseWeChatUtils.getToken();
String msg = EnterpriseWeChatUtils.makeUserSendMsg(listUserId.stream().findFirst().get(), agentId, "您的会议室已经预定,稍后会同步到`邮箱` \n" +
">**事项详情** \n" +
">事 项:<font color='info'>开会</font> <br>" +
">组织者:@miglioguan \n" +
">参与者:@miglioguan、@kunliu、@jamdeezhou、@kanexiong、@kisonwang \n" +
"> \n" +
">会议室:<font color='info'>广州TIT 1楼 301</font> \n" +
">日 期:<font color='warning'>2018年5月18日</font> \n" +
">时 间:<font color='comment'>上午9:00-11:00</font> \n" +
"> \n" +
">请准时参加会议。 \n" +
"> \n" +
">如需修改会议信息,请点击:[修改会议信息](https://work.weixin.qq.com)\"");
String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
public void testSendMultiUserWeChat() {
try {
String token = EnterpriseWeChatUtils.getToken();
String msg = EnterpriseWeChatUtils.makeUserSendMsg(listUserId, agentId, "hello world");
String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
} catch (IOException e) {
e.printStackTrace();
}
}
}

17
escheduler-api/pom.xml

@ -1,18 +1,19 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.3-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<artifactId>escheduler-api</artifactId>
<packaging>jar</packaging>
<dependencies>
<dependency>
<dependency>
<groupId>cn.analysys</groupId>
<artifactId>escheduler-alert</artifactId>
</dependency>
<dependency>
<groupId>cn.analysys</groupId>
<artifactId>escheduler-server</artifactId>
<exclusions>
@ -48,6 +49,10 @@
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-tomcat</artifactId>
</exclusion>
<exclusion>
<artifactId>log4j-to-slf4j</artifactId>
<groupId>org.apache.logging.log4j</groupId>
</exclusion>
</exclusions>
</dependency>

54
escheduler-api/src/main/java/cn/escheduler/api/CombinedApplicationServer.java

@ -0,0 +1,54 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.api;
import cn.escheduler.alert.AlertServer;
import cn.escheduler.dao.ProcessDao;
import cn.escheduler.server.master.MasterServer;
import cn.escheduler.server.rpc.LoggerServer;
import cn.escheduler.server.worker.WorkerServer;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.web.servlet.ServletComponentScan;
import org.springframework.boot.web.servlet.support.SpringBootServletInitializer;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.ComponentScan;
import springfox.documentation.swagger2.annotations.EnableSwagger2;
@SpringBootApplication
@ServletComponentScan
@ComponentScan("cn.escheduler")
@EnableSwagger2
public class CombinedApplicationServer extends SpringBootServletInitializer {
public static void main(String[] args) throws Exception {
ConfigurableApplicationContext context = SpringApplication.run(ApiApplicationServer.class, args);
ProcessDao processDao = context.getBean(ProcessDao.class);
MasterServer master = new MasterServer(processDao);
master.run(processDao);
WorkerServer workerServer = new WorkerServer();
workerServer.run();
LoggerServer server = new LoggerServer();
server.start();
AlertServer alertServer = AlertServer.getInstance();
alertServer.start();
}
}

1
escheduler-api/src/main/java/cn/escheduler/api/configuration/AppConfiguration.java

@ -84,6 +84,7 @@ public class AppConfiguration implements WebMvcConfigurer {
registry.addResourceHandler("/static/**").addResourceLocations("classpath:/static/");
registry.addResourceHandler("doc.html").addResourceLocations("classpath:/META-INF/resources/");
registry.addResourceHandler("/webjars/**").addResourceLocations("classpath:/META-INF/resources/webjars/");
registry.addResourceHandler("/ui/**").addResourceLocations("file:ui/");
}
@Override

47
escheduler-api/src/main/java/cn/escheduler/api/controller/DataSourceController.java

@ -18,10 +18,14 @@ package cn.escheduler.api.controller;
import cn.escheduler.api.enums.Status;
import cn.escheduler.api.service.DataSourceService;
import cn.escheduler.api.utils.CheckUtils;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.DbType;
import cn.escheduler.common.enums.ResUploadType;
import cn.escheduler.common.utils.CommonUtils;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.common.utils.PropertyUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -34,9 +38,11 @@ import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.*;
import springfox.documentation.annotations.ApiIgnore;
import java.util.HashMap;
import java.util.Map;
import static cn.escheduler.api.enums.Status.*;
import static cn.escheduler.common.utils.PropertyUtils.getBoolean;
/**
@ -54,12 +60,16 @@ public class DataSourceController extends BaseController {
/**
* create data source
* 创建数据源
*
* @param loginUser
* @param name
* @param note
* @param type
* @param host
* @param port
* @param database
* @param principal
* @param userName
* @param password
* @param other
* @return
*/
@ -84,13 +94,14 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "host") String host,
@RequestParam(value = "port") String port,
@RequestParam(value = "database") String database,
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "other") String other) {
logger.info("login user {} create datasource ame: {}, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {},port: {},database : {},principal: {},userName : {} other: {}",
loginUser.getUserName(), name, note, type, host,port,database,principal,userName,other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
Map<String, Object> result = dataSourceService.createDataSource(loginUser, name, note, type, parameter);
return returnDataList(result);
@ -134,13 +145,14 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "host") String host,
@RequestParam(value = "port") String port,
@RequestParam(value = "database") String database,
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "other") String other) {
logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, other);
Map<String, Object> dataSource = dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter);
return returnDataList(dataSource);
} catch (Exception e) {
@ -269,13 +281,14 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "host") String host,
@RequestParam(value = "port") String port,
@RequestParam(value = "database") String database,
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "other") String other) {
logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
Boolean isConnection = dataSourceService.checkConnection(type, parameter);
Result result = new Result();
@ -429,4 +442,24 @@ public class DataSourceController extends BaseController {
return error(AUTHORIZED_DATA_SOURCE.getCode(), AUTHORIZED_DATA_SOURCE.getMsg());
}
}
/**
* get user info
*
* @param loginUser
* @return
*/
@ApiOperation(value = "getKerberosStartupState", notes= "GET_USER_INFO_NOTES")
@GetMapping(value="/kerberos-startup-state")
@ResponseStatus(HttpStatus.OK)
public Result getKerberosStartupState(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser){
logger.info("login user {},get kerberos startup state : {}", loginUser.getUserName());
try{
// if upload resource is HDFS and kerberos startup is true , else false
return success(Status.SUCCESS.getMsg(), CommonUtils.getKerberosStartupState());
}catch (Exception e){
logger.error(KERBEROS_STARTUP_STATE.getMsg(),e);
return error(Status.KERBEROS_STARTUP_STATE.getCode(), Status.KERBEROS_STARTUP_STATE.getMsg());
}
}
}

51
escheduler-api/src/main/java/cn/escheduler/api/controller/ExecutorController.java

@ -24,7 +24,7 @@ import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.*;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -38,9 +38,9 @@ import static cn.escheduler.api.enums.Status.*;
/**
* execute task controller
* execute process controller
*/
@ApiIgnore
@Api(tags = "PROCESS_INSTANCE_EXECUTOR_TAG", position = 1)
@RestController
@RequestMapping("projects/{projectName}/executors")
public class ExecutorController extends BaseController {
@ -53,10 +53,27 @@ public class ExecutorController extends BaseController {
/**
* execute process instance
*/
@ApiOperation(value = "startProcessInstance", notes= "RUN_PROCESS_INSTANCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "scheduleTime", value = "SCHEDULE_TIME", required = true, dataType = "String"),
@ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", required = true, dataType ="FailureStrategy"),
@ApiImplicitParam(name = "startNodeList", value = "START_NODE_LIST", dataType ="String"),
@ApiImplicitParam(name = "taskDependType", value = "TASK_DEPEND_TYPE", dataType ="TaskDependType"),
@ApiImplicitParam(name = "execType", value = "COMMAND_TYPE", dataType ="CommandType"),
@ApiImplicitParam(name = "warningType", value = "WARNING_TYPE",required = true, dataType ="WarningType"),
@ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID",required = true, dataType ="Int", example = "100"),
@ApiImplicitParam(name = "receivers", value = "RECEIVERS",dataType ="String" ),
@ApiImplicitParam(name = "receiversCc", value = "RECEIVERS_CC",dataType ="String" ),
@ApiImplicitParam(name = "runMode", value = "RUN_MODE",dataType ="RunMode" ),
@ApiImplicitParam(name = "processInstancePriority", value = "PROCESS_INSTANCE_PRIORITY", required = true, dataType = "Priority" ),
@ApiImplicitParam(name = "workerGroupId", value = "WORKER_GROUP_ID", dataType = "Int",example = "100"),
@ApiImplicitParam(name = "timeout", value = "TIMEOUT", dataType = "Int",example = "100"),
})
@PostMapping(value = "start-process-instance")
@ResponseStatus(HttpStatus.OK)
public Result startProcessInstance(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@PathVariable String projectName,
public Result startProcessInstance(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam(value = "processDefinitionId") int processDefinitionId,
@RequestParam(value = "scheduleTime", required = false) String scheduleTime,
@RequestParam(value = "failureStrategy", required = true) FailureStrategy failureStrategy,
@ -102,10 +119,15 @@ public class ExecutorController extends BaseController {
* @param processInstanceId
* @return
*/
@ApiOperation(value = "execute", notes= "EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processInstanceId", value = "PROCESS_INSTANCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "executeType", value = "EXECUTE_TYPE", required = true, dataType = "ExecuteType")
})
@PostMapping(value = "/execute")
@ResponseStatus(HttpStatus.OK)
public Result execute(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@PathVariable String projectName,
public Result execute(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam("processInstanceId") Integer processInstanceId,
@RequestParam("executeType") ExecuteType executeType
) {
@ -127,9 +149,13 @@ public class ExecutorController extends BaseController {
* @param processDefinitionId
* @return
*/
@ApiOperation(value = "startCheckProcessDefinition", notes= "START_CHECK_PROCESS_DEFINITION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping(value = "/start-check")
@ResponseStatus(HttpStatus.OK)
public Result startCheckProcessDefinition(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
public Result startCheckProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "processDefinitionId") int processDefinitionId) {
logger.info("login user {}, check process definition", loginUser.getUserName(), processDefinitionId);
try {
@ -149,9 +175,16 @@ public class ExecutorController extends BaseController {
* @param processDefinitionId
* @return
*/
@ApiIgnore
@ApiOperation(value = "getReceiverCc", notes= "GET_RECEIVER_CC_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "processInstanceId", value = "PROCESS_INSTANCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/get-receiver-cc")
@ResponseStatus(HttpStatus.OK)
public Result getReceiverCc(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
public Result getReceiverCc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "processDefinitionId",required = false) Integer processDefinitionId,
@RequestParam(value = "processInstanceId",required = false) Integer processInstanceId) {
logger.info("login user {}, get process definition receiver and cc", loginUser.getUserName());

4
escheduler-api/src/main/java/cn/escheduler/api/controller/MonitorController.java

@ -66,7 +66,7 @@ public class MonitorController extends BaseController{
logger.info("login user: {}, query all master", loginUser.getUserName());
try{
logger.info("list master, user:{}", loginUser.getUserName());
Map<String, Object> result = serverService.queryMaster(loginUser);
Map<String, Object> result = monitorService.queryMaster(loginUser);
return returnDataList(result);
}catch (Exception e){
logger.error(LIST_MASTERS_ERROR.getMsg(),e);
@ -86,7 +86,7 @@ public class MonitorController extends BaseController{
public Result listWorker(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) {
logger.info("login user: {}, query all workers", loginUser.getUserName());
try{
Map<String, Object> result = serverService.queryWorker(loginUser);
Map<String, Object> result = monitorService.queryWorker(loginUser);
return returnDataList(result);
}catch (Exception e){
logger.error(LIST_WORKERS_ERROR.getMsg(),e);

4
escheduler-api/src/main/java/cn/escheduler/api/controller/ResourcesController.java

@ -236,9 +236,9 @@ public class ResourcesController extends BaseController{
) {
try {
logger.info("login user {}, verfiy resource alias: {},resource type: {}",
loginUser.getUserName(), alias);
loginUser.getUserName(), alias,type);
return resourceService.verifyResourceName(alias, type);
return resourceService.verifyResourceName(alias,type,loginUser);
} catch (Exception e) {
logger.error(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg(), e);
return error(Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getCode(), Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg());

29
escheduler-api/src/main/java/cn/escheduler/api/controller/SchedulerController.java

@ -304,4 +304,33 @@ public class SchedulerController extends BaseController {
return error(Status.QUERY_SCHEDULE_LIST_ERROR.getCode(), Status.QUERY_SCHEDULE_LIST_ERROR.getMsg());
}
}
/**
* preview schedule
*
* @param loginUser
* @param projectName
* @param schedule
* @return
*/
@ApiOperation(value = "previewSchedule", notes= "PREVIEW_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"),
})
@PostMapping("/preview")
@ResponseStatus(HttpStatus.CREATED)
public Result previewSchedule(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser,
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam(value = "schedule") String schedule
){
logger.info("login user {}, project name: {}, preview schedule: {}",
loginUser.getUserName(), projectName, schedule);
try {
Map<String, Object> result = schedulerService.previewSchedule(loginUser, projectName, schedule);
return returnDataList(result);
} catch (Exception e) {
logger.error(PREVIEW_SCHEDULE_ERROR.getMsg(), e);
return error(PREVIEW_SCHEDULE_ERROR.getCode(), PREVIEW_SCHEDULE_ERROR.getMsg());
}
}
}

8
escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java

@ -161,7 +161,9 @@ public enum Status {
SAVE_ERROR(10136, "save error"),
DELETE_PROJECT_ERROR_DEFINES_NOT_NULL(10137, "please delete the process definitions in project first!"),
BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR(10117,"batch delete process instance by ids {0} error"),
PREVIEW_SCHEDULE_ERROR(10139,"preview schedule error"),
PARSE_TO_CRON_EXPRESSION_ERROR(10140,"parse cron to cron expression error"),
SCHEDULE_START_TIME_END_TIME_SAME(10141,"The start time must not be the same as the end"),
UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found"),
@ -174,6 +176,8 @@ public enum Status {
RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified"),
UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar"),
HDFS_COPY_FAIL(20009, "hdfs copy {0} -> {1} fail"),
RESOURCE_FILE_EXIST(20010, "resource file {0} already exists in hdfs,please delete it or change name!"),
RESOURCE_FILE_NOT_EXIST(20011, "resource file {0} not exists in hdfs!"),
@ -208,6 +212,7 @@ public enum Status {
DELETE_SCHEDULE_CRON_BY_ID_ERROR(50024,"delete schedule by id error"),
BATCH_DELETE_PROCESS_DEFINE_ERROR(50025,"batch delete process definition error"),
BATCH_DELETE_PROCESS_DEFINE_BY_IDS_ERROR(50026,"batch delete process definition by ids {0} error"),
TENANT_NOT_SUITABLE(50027,"there is not any tenant suitable, please choose a tenant available."),
HDFS_NOT_STARTUP(60001,"hdfs not startup"),
HDFS_TERANT_RESOURCES_FILE_EXISTS(60002,"resource file exists,please delete resource first"),
@ -230,6 +235,7 @@ public enum Status {
QUEUE_COUNT_ERROR(90001,"queue count error"),
KERBEROS_STARTUP_STATE(100001,"get kerberos startup state error"),
;
private int code;

98
escheduler-api/src/main/java/cn/escheduler/api/service/DataSourceService.java

@ -21,7 +21,10 @@ import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.PageInfo;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.DbType;
import cn.escheduler.common.enums.UserType;
import cn.escheduler.common.job.db.*;
import cn.escheduler.common.utils.CommonUtils;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.dao.mapper.DataSourceMapper;
import cn.escheduler.dao.mapper.DatasourceUserMapper;
import cn.escheduler.dao.mapper.ProjectMapper;
@ -30,6 +33,8 @@ import cn.escheduler.dao.model.Resource;
import cn.escheduler.dao.model.User;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -41,6 +46,8 @@ import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.*;
import static cn.escheduler.common.utils.PropertyUtils.getString;
/**
* datasource service
*/
@ -54,19 +61,16 @@ public class DataSourceService extends BaseService{
public static final String TYPE = "type";
public static final String HOST = "host";
public static final String PORT = "port";
public static final String PRINCIPAL = "principal";
public static final String DATABASE = "database";
public static final String USER_NAME = "userName";
public static final String PASSWORD = "password";
public static final String PASSWORD = cn.escheduler.common.Constants.PASSWORD;
public static final String OTHER = "other";
@Autowired
private ProjectMapper projectMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProjectService projectService;
@Autowired
private DatasourceUserMapper datasourceUserMapper;
@ -239,6 +243,7 @@ public class DataSourceService extends BaseService{
map.put(TYPE, dataSourceType);
map.put(HOST, host);
map.put(PORT, port);
map.put(PRINCIPAL, datasourceForm.getPrincipal());
map.put(DATABASE, database);
map.put(USER_NAME, datasourceForm.getUser());
map.put(PASSWORD, datasourceForm.getPassword());
@ -284,13 +289,37 @@ public class DataSourceService extends BaseService{
* @return
*/
private List<DataSource> getDataSources(User loginUser, String searchVal, Integer pageSize, PageInfo pageInfo) {
List<DataSource> dataSourceList = null;
if (isAdmin(loginUser)) {
return dataSourceMapper.queryAllDataSourcePaging(searchVal, pageInfo.getStart(), pageSize);
dataSourceList = dataSourceMapper.queryAllDataSourcePaging(searchVal, pageInfo.getStart(), pageSize);
}else{
dataSourceList = dataSourceMapper.queryDataSourcePaging(loginUser.getId(), searchVal,
pageInfo.getStart(), pageSize);
}
return dataSourceMapper.queryDataSourcePaging(loginUser.getId(), searchVal,
pageInfo.getStart(), pageSize);
handlePasswd(dataSourceList);
return dataSourceList;
}
/**
* handle datasource connection password for safety
* @param dataSourceList
*/
private void handlePasswd(List<DataSource> dataSourceList) {
for (DataSource dataSource : dataSourceList) {
String connectionParams = dataSource.getConnectionParams();
JSONObject object = JSONObject.parseObject(connectionParams);
object.put(cn.escheduler.common.Constants.PASSWORD, cn.escheduler.common.Constants.XXXXXX);
dataSource.setConnectionParams(JSONUtils.toJson(object));
}
}
/**
* get datasource total num
*
@ -313,7 +342,14 @@ public class DataSourceService extends BaseService{
*/
public Map<String, Object> queryDataSourceList(User loginUser, Integer type) {
Map<String, Object> result = new HashMap<>(5);
List<DataSource> datasourceList = dataSourceMapper.queryDataSourceByType(loginUser.getId(), type);
List<DataSource> datasourceList;
if (isAdmin(loginUser)) {
datasourceList = dataSourceMapper.listAllDataSourceByType(type);
}else{
datasourceList = dataSourceMapper.queryDataSourceByType(loginUser.getId(), type);
}
result.put(Constants.DATA_LIST, datasourceList);
putMsg(result, Status.SUCCESS);
@ -362,11 +398,21 @@ public class DataSourceService extends BaseService{
Class.forName(Constants.COM_MYSQL_JDBC_DRIVER);
break;
case HIVE:
datasource = JSONObject.parseObject(parameter, HiveDataSource.class);
Class.forName(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER);
break;
case SPARK:
datasource = JSONObject.parseObject(parameter, SparkDataSource.class);
if (CommonUtils.getKerberosStartupState()) {
System.setProperty(cn.escheduler.common.Constants.JAVA_SECURITY_KRB5_CONF,
getString(cn.escheduler.common.Constants.JAVA_SECURITY_KRB5_CONF_PATH));
Configuration configuration = new Configuration();
configuration.set(cn.escheduler.common.Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(getString(cn.escheduler.common.Constants.LOGIN_USER_KEY_TAB_USERNAME),
getString(cn.escheduler.common.Constants.LOGIN_USER_KEY_TAB_PATH));
}
if (dbType == DbType.HIVE){
datasource = JSONObject.parseObject(parameter, HiveDataSource.class);
}else if (dbType == DbType.SPARK){
datasource = JSONObject.parseObject(parameter, SparkDataSource.class);
}
Class.forName(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER);
break;
case CLICKHOUSE:
@ -442,10 +488,18 @@ public class DataSourceService extends BaseService{
* @param other
* @return
*/
public String buildParameter(String name, String desc, DbType type, String host, String port, String database, String userName, String password, String other) {
public String buildParameter(String name, String desc, DbType type, String host,
String port, String database,String principal,String userName,
String password, String other) {
String address = buildAddress(type, host, port);
String jdbcUrl = address + "/" + database;
if (CommonUtils.getKerberosStartupState() &&
(type == DbType.HIVE || type == DbType.SPARK)){
jdbcUrl += ";principal=" + principal;
}
String separator = "";
if (Constants.MYSQL.equals(type.name())
|| Constants.POSTGRESQL.equals(type.name())
@ -464,6 +518,10 @@ public class DataSourceService extends BaseService{
parameterMap.put(Constants.JDBC_URL, jdbcUrl);
parameterMap.put(Constants.USER, userName);
parameterMap.put(Constants.PASSWORD, password);
if (CommonUtils.getKerberosStartupState() &&
(type == DbType.HIVE || type == DbType.SPARK)){
parameterMap.put(Constants.PRINCIPAL,principal);
}
if (other != null && !"".equals(other)) {
Map map = JSONObject.parseObject(other, new TypeReference<LinkedHashMap<String, String>>() {
});
@ -537,7 +595,7 @@ public class DataSourceService extends BaseService{
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if(loginUser.getId() != dataSource.getUserId()){
if(loginUser.getId() != dataSource.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER){
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
@ -622,13 +680,13 @@ public class DataSourceService extends BaseService{
*/
private String[] getHostsAndPort(String address) {
String[] result = new String[2];
String[] tmpArray = address.split("//");
String[] tmpArray = address.split(cn.escheduler.common.Constants.DOUBLE_SLASH);
String hostsAndPorts = tmpArray[tmpArray.length - 1];
StringBuilder hosts = new StringBuilder("");
String[] hostPortArray = hostsAndPorts.split(",");
String port = hostPortArray[0].split(":")[1];
StringBuilder hosts = new StringBuilder();
String[] hostPortArray = hostsAndPorts.split(cn.escheduler.common.Constants.COMMA);
String port = hostPortArray[0].split(cn.escheduler.common.Constants.COLON)[1];
for (String hostPort : hostPortArray) {
hosts.append(hostPort.split(":")[0]).append(",");
hosts.append(hostPort.split(cn.escheduler.common.Constants.COLON)[0]).append(cn.escheduler.common.Constants.COMMA);
}
hosts.deleteCharAt(hosts.length() - 1);
result[0] = hosts.toString();

29
escheduler-api/src/main/java/cn/escheduler/api/service/ExecutorService.java

@ -110,6 +110,13 @@ public class ExecutorService extends BaseService{
return result;
}
if (!checkTenantSuitable(processDefinition)){
logger.error("there is not any vaild tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
return result;
}
/**
* create command
*/
@ -190,6 +197,11 @@ public class ExecutorService extends BaseService{
if (status != Status.SUCCESS) {
return checkResult;
}
if (!checkTenantSuitable(processDefinition)){
logger.error("there is not any vaild tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
}
switch (executeType) {
case REPEAT_RUNNING:
@ -230,6 +242,21 @@ public class ExecutorService extends BaseService{
return result;
}
/**
* check tenant suitable
* @param processDefinition
* @return
*/
private boolean checkTenantSuitable(ProcessDefinition processDefinition) {
// checkTenantExists();
Tenant tenant = processDao.getTenantForProcess(processDefinition.getTenantId(),
processDefinition.getUserId());
if(tenant == null){
return false;
}
return true;
}
/**
* Check the state of process instance and the type of operation match
*
@ -260,7 +287,7 @@ public class ExecutorService extends BaseService{
}
break;
case RECOVER_SUSPENDED_PROCESS:
if (executionStatus.typeIsPause()) {
if (executionStatus.typeIsPause()|| executionStatus.typeIsCancel()) {
checkResult = true;
}
default:

41
escheduler-api/src/main/java/cn/escheduler/api/service/MonitorService.java

@ -18,13 +18,16 @@ package cn.escheduler.api.service;
import cn.escheduler.api.enums.Status;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.ZookeeperMonitorUtils;
import cn.escheduler.api.utils.ZookeeperMonitor;
import cn.escheduler.dao.MonitorDBDao;
import cn.escheduler.dao.model.MasterServer;
import cn.escheduler.dao.model.MonitorRecord;
import cn.escheduler.dao.model.User;
import cn.escheduler.dao.model.ZookeeperRecord;
import org.apache.hadoop.mapred.Master;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -52,6 +55,22 @@ public class MonitorService extends BaseService{
}
/**
* query master list
*
* @param loginUser
* @return
*/
public Map<String,Object> queryMaster(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<MasterServer> masterServers = new ZookeeperMonitor().getMasterServers();
result.put(Constants.DATA_LIST, masterServers);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query zookeeper state
@ -61,7 +80,7 @@ public class MonitorService extends BaseService{
public Map<String,Object> queryZookeeperState(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<ZookeeperRecord> zookeeperRecordList = ZookeeperMonitorUtils.zookeeperInfoList();
List<ZookeeperRecord> zookeeperRecordList = ZookeeperMonitor.zookeeperInfoList();
result.put(Constants.DATA_LIST, zookeeperRecordList);
putMsg(result, Status.SUCCESS);
@ -69,4 +88,22 @@ public class MonitorService extends BaseService{
return result;
}
/**
* query master list
*
* @param loginUser
* @return
*/
public Map<String,Object> queryWorker(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<MasterServer> workerServers = new ZookeeperMonitor().getWorkerServers();
result.put(Constants.DATA_LIST, workerServers);
putMsg(result,Status.SUCCESS);
return result;
}
}

7
escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java

@ -24,6 +24,7 @@ import cn.escheduler.api.utils.PageInfo;
import cn.escheduler.common.enums.Flag;
import cn.escheduler.common.enums.ReleaseState;
import cn.escheduler.common.enums.TaskType;
import cn.escheduler.common.enums.UserType;
import cn.escheduler.common.graph.DAG;
import cn.escheduler.common.model.TaskNode;
import cn.escheduler.common.model.TaskNodeRelation;
@ -127,6 +128,7 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefine.setLocations(locations);
processDefine.setConnects(connects);
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
//custom global params
List<Property> globalParamsList = processData.getGlobalParams();
@ -291,6 +293,7 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefine.setLocations(locations);
processDefine.setConnects(connects);
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
//custom global params
List<Property> globalParamsList = new ArrayList<>();
@ -365,7 +368,7 @@ public class ProcessDefinitionService extends BaseDAGService {
}
// Determine if the login user is the owner of the process definition
if (loginUser.getId() != processDefinition.getUserId()) {
if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
@ -487,7 +490,7 @@ public class ProcessDefinitionService extends BaseDAGService {
// set status
schedule.setReleaseState(ReleaseState.OFFLINE);
scheduleMapper.update(schedule);
deleteSchedule(project.getId(), id);
deleteSchedule(project.getId(), schedule.getId());
}
break;
}

30
escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java

@ -38,10 +38,7 @@ import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.common.utils.placeholder.BusinessTimeUtils;
import cn.escheduler.dao.ProcessDao;
import cn.escheduler.dao.mapper.ProcessDefinitionMapper;
import cn.escheduler.dao.mapper.ProcessInstanceMapper;
import cn.escheduler.dao.mapper.ProjectMapper;
import cn.escheduler.dao.mapper.TaskInstanceMapper;
import cn.escheduler.dao.mapper.*;
import cn.escheduler.dao.model.*;
import com.alibaba.fastjson.JSON;
import org.apache.commons.lang3.StringUtils;
@ -97,6 +94,9 @@ public class ProcessInstanceService extends BaseDAGService {
@Autowired
LoggerService loggerService;
@Autowired
WorkerGroupMapper workerGroupMapper;
/**
* query process instance by id
*
@ -115,6 +115,21 @@ public class ProcessInstanceService extends BaseDAGService {
return checkResult;
}
ProcessInstance processInstance = processDao.findProcessInstanceDetailById(processId);
String workerGroupName = "";
if(processInstance.getWorkerGroupId() == -1){
workerGroupName = DEFAULT;
}else{
WorkerGroup workerGroup = workerGroupMapper.queryById(processInstance.getWorkerGroupId());
if(workerGroup != null){
workerGroupName = DEFAULT;
}else{
workerGroupName = workerGroup.getName();
}
}
processInstance.setWorkerGroupName(workerGroupName);
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
processInstance.setReceivers(processDefinition.getReceivers());
processInstance.setReceiversCc(processDefinition.getReceiversCc());
result.put(Constants.DATA_LIST, processInstance);
putMsg(result, Status.SUCCESS);
@ -364,6 +379,7 @@ public class ProcessInstanceService extends BaseDAGService {
String globalParams = null;
String originDefParams = null;
int timeout = processInstance.getTimeout();
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
if (StringUtils.isNotEmpty(processInstanceJson)) {
ProcessData processData = JSONUtils.parseObject(processInstanceJson, ProcessData.class);
//check workflow json is valid
@ -379,6 +395,11 @@ public class ProcessInstanceService extends BaseDAGService {
processInstance.getCmdTypeIfComplement(), schedule);
timeout = processData.getTimeout();
processInstance.setTimeout(timeout);
Tenant tenant = processDao.getTenantForProcess(processData.getTenantId(),
processDefinition.getUserId());
if(tenant != null){
processInstance.setTenantCode(tenant.getTenantCode());
}
processInstance.setProcessInstanceJson(processInstanceJson);
processInstance.setGlobalParams(globalParams);
}
@ -387,7 +408,6 @@ public class ProcessInstanceService extends BaseDAGService {
int update = processDao.updateProcessInstance(processInstance);
int updateDefine = 1;
if (syncDefine && StringUtils.isNotEmpty(processInstanceJson)) {
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
processDefinition.setProcessDefinitionJson(processInstanceJson);
processDefinition.setGlobalParams(originDefParams);
processDefinition.setLocations(locations);

9
escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java

@ -76,15 +76,6 @@ public class ProjectService extends BaseService{
return descCheck;
}
/**
* only general users can create projects. administrators have no corresponding tenants and can only view
* 管理员没有对应的租户,只能查看,只有普通用户才可以创建项目
*/
if (!userService.isGeneral(loginUser)) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
Project project = projectMapper.queryByName(name);
if (project != null) {
putMsg(result, Status.PROJECT_ALREADY_EXISTS, name);

135
escheduler-api/src/main/java/cn/escheduler/api/service/ResourcesService.java

@ -21,6 +21,7 @@ import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.PageInfo;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.ResourceType;
import cn.escheduler.common.enums.UserType;
import cn.escheduler.common.utils.FileUtils;
import cn.escheduler.common.utils.HadoopUtils;
import cn.escheduler.common.utils.PropertyUtils;
@ -85,8 +86,8 @@ public class ResourcesService extends BaseService {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -184,9 +185,9 @@ public class ResourcesService extends BaseService {
ResourceType type) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -368,7 +369,12 @@ public class ResourcesService extends BaseService {
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
List<Resource> resourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), type.ordinal());
List<Resource> resourceList;
if(isAdmin(loginUser)){
resourceList = resourcesMapper.listAllResourceByType(type.ordinal());
}else{
resourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), type.ordinal());
}
result.put(Constants.DATA_LIST, resourceList);
putMsg(result,Status.SUCCESS);
@ -385,9 +391,9 @@ public class ResourcesService extends BaseService {
public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -399,7 +405,7 @@ public class ResourcesService extends BaseService {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (loginUser.getId() != resource.getUserId()) {
if (loginUser.getId() != resource.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
@ -420,6 +426,41 @@ public class ResourcesService extends BaseService {
return result;
}
/**
* verify resource by name and type
* @param name
* @param type
* @param loginUser
* @return
*/
public Result verifyResourceName(String name, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
Resource resource = resourcesMapper.queryResourceByNameAndType(name, type.ordinal());
if (resource != null) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, name);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
try {
String hdfsFilename = getHdfsFileName(type,tenantCode,name);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, name,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
}
return result;
}
/**
* verify resource by name and type
*
@ -448,9 +489,9 @@ public class ResourcesService extends BaseService {
public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -480,13 +521,19 @@ public class ResourcesService extends BaseService {
String hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
if(HadoopUtils.getInstance().exists(hdfsFileName)){
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, StringUtils.join(content.toArray(), "\n"));
result.setData(map);
}else{
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, StringUtils.join(content.toArray(), "\n"));
result.setData(map);
} catch (Exception e) {
logger.error(String.format("Resource %s read failed", hdfsFileName), e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
@ -509,9 +556,9 @@ public class ResourcesService extends BaseService {
@Transactional(value = "TransactionManager",rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -530,17 +577,14 @@ public class ResourcesService extends BaseService {
String name = fileName.trim() + "." + nameSuffix;
//check file already exists
Resource resource = resourcesMapper.queryResourceByNameAndType(name, type.ordinal());
if (resource != null) {
logger.error("resource {} has exist, can't recreate .", name);
putMsg(result, Status.RESOURCE_EXIST);
result = verifyResourceName(name,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
resource = new Resource(name,name,desc,loginUser.getId(),type,content.getBytes().length,now,now);
Resource resource = new Resource(name,name,desc,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
@ -569,12 +613,13 @@ public class ResourcesService extends BaseService {
* @param resourceId
* @return
*/
@Transactional(value = "TransactionManager",rollbackFor = Exception.class)
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -597,6 +642,10 @@ public class ResourcesService extends BaseService {
}
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.update(resource);
User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
@ -643,6 +692,7 @@ public class ResourcesService extends BaseService {
logger.error("{} is not exist", resourcePath);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("%s is not exist", resourcePath));
return result;
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
@ -662,9 +712,9 @@ public class ResourcesService extends BaseService {
* @return
*/
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup");
}
@ -809,6 +859,23 @@ public class ResourcesService extends BaseService {
return hdfsFileName;
}
/**
* get hdfs file name
*
* @param resourceType
* @param tenantCode
* @param hdfsFileName
* @return
*/
private String getHdfsFileName(ResourceType resourceType, String tenantCode, String hdfsFileName) {
if (resourceType.equals(ResourceType.FILE)) {
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, hdfsFileName);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, hdfsFileName);
}
return hdfsFileName;
}
/**
* get authorized resource list
*

54
escheduler-api/src/main/java/cn/escheduler/api/service/SchedulerService.java

@ -19,14 +19,13 @@ package cn.escheduler.api.service;
import cn.escheduler.api.dto.ScheduleParam;
import cn.escheduler.api.enums.Status;
import cn.escheduler.server.quartz.ProcessScheduleJob;
import cn.escheduler.server.quartz.QuartzExecutors;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.PageInfo;
import cn.escheduler.common.enums.FailureStrategy;
import cn.escheduler.common.enums.Priority;
import cn.escheduler.common.enums.ReleaseState;
import cn.escheduler.common.enums.WarningType;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.dao.ProcessDao;
import cn.escheduler.dao.mapper.MasterServerMapper;
@ -34,7 +33,11 @@ import cn.escheduler.dao.mapper.ProcessDefinitionMapper;
import cn.escheduler.dao.mapper.ProjectMapper;
import cn.escheduler.dao.mapper.ScheduleMapper;
import cn.escheduler.dao.model.*;
import cn.escheduler.dao.utils.cron.CronUtils;
import cn.escheduler.server.quartz.ProcessScheduleJob;
import cn.escheduler.server.quartz.QuartzExecutors;
import org.apache.commons.lang3.StringUtils;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -42,6 +45,7 @@ import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.io.IOException;
import java.text.ParseException;
import java.util.*;
/**
@ -115,6 +119,11 @@ public class SchedulerService extends BaseService {
scheduleObj.setProcessDefinitionName(processDefinition.getName());
ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class);
if (DateUtils.differSec(scheduleParam.getStartTime(),scheduleParam.getEndTime()) == 0) {
logger.warn("The start time must not be the same as the end");
putMsg(result,Status.SCHEDULE_START_TIME_END_TIME_SAME);
return result;
}
scheduleObj.setStartTime(scheduleParam.getStartTime());
scheduleObj.setEndTime(scheduleParam.getEndTime());
if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) {
@ -201,6 +210,11 @@ public class SchedulerService extends BaseService {
// updateProcessInstance param
if (StringUtils.isNotEmpty(scheduleExpression)) {
ScheduleParam scheduleParam = JSONUtils.parseObject(scheduleExpression, ScheduleParam.class);
if (DateUtils.differSec(scheduleParam.getStartTime(),scheduleParam.getEndTime()) == 0) {
logger.warn("The start time must not be the same as the end");
putMsg(result,Status.SCHEDULE_START_TIME_END_TIME_SAME);
return result;
}
schedule.setStartTime(scheduleParam.getStartTime());
schedule.setEndTime(scheduleParam.getEndTime());
if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) {
@ -442,14 +456,14 @@ public class SchedulerService extends BaseService {
/**
* delete schedule
*/
public static void deleteSchedule(int projectId, int processId) throws RuntimeException{
logger.info("delete schedules of project id:{}, flow id:{}", projectId, processId);
public static void deleteSchedule(int projectId, int scheduleId) throws RuntimeException{
logger.info("delete schedules of project id:{}, schedule id:{}", projectId, scheduleId);
String jobName = QuartzExecutors.buildJobName(processId);
String jobName = QuartzExecutors.buildJobName(scheduleId);
String jobGroupName = QuartzExecutors.buildJobGroupName(projectId);
if(!QuartzExecutors.getInstance().deleteJob(jobName, jobGroupName)){
logger.warn("set offline failure:projectId:{},processId:{}",projectId,processId);
logger.warn("set offline failure:projectId:{},scheduleId:{}",projectId,scheduleId);
throw new RuntimeException(String.format("set offline failure"));
}
@ -537,4 +551,32 @@ public class SchedulerService extends BaseService {
}
return result;
}
/**
* preview schedule
* @param loginUser
* @param projectName
* @param schedule
* @return
*/
public Map<String,Object> previewSchedule(User loginUser, String projectName, String schedule) {
Map<String, Object> result = new HashMap<>(5);
CronExpression cronExpression;
ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class);
Date now = new Date();
Date startTime = now.after(scheduleParam.getStartTime()) ? now : scheduleParam.getStartTime();
Date endTime = scheduleParam.getEndTime();
try {
cronExpression = CronUtils.parse2CronExpression(scheduleParam.getCrontab());
} catch (ParseException e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.PARSE_TO_CRON_EXPRESSION_ERROR);
return result;
}
List<Date> selfFireDateList = CronUtils.getSelfFireDateList(startTime, endTime,cronExpression);
result.put(Constants.DATA_LIST, selfFireDateList.stream().map(t -> DateUtils.dateToString(t)).limit(cn.escheduler.common.Constants.PREVIEW_SCHEDULE_EXECUTE_COUNT));
putMsg(result, Status.SUCCESS);
return result;
}
}

44
escheduler-api/src/main/java/cn/escheduler/api/service/TenantService.java

@ -96,7 +96,7 @@ public class TenantService extends BaseService{
tenantMapper.insert(tenant);
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
if (PropertyUtils.getResUploadStartupState()){
String resourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + tenantCode + "/resources";
String udfsPath = HadoopUtils.getHdfsUdfDir(tenantCode);
/**
@ -166,7 +166,7 @@ public class TenantService extends BaseService{
Tenant tenant = tenantMapper.queryById(id);
if (tenant == null){
putMsg(result, Status.USER_NOT_EXIST, id);
putMsg(result, Status.TENANT_NOT_EXIST);
return result;
}
@ -178,7 +178,7 @@ public class TenantService extends BaseService{
Tenant newTenant = tenantMapper.queryByTenantCode(tenantCode);
if (newTenant == null){
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
if (PropertyUtils.getResUploadStartupState()){
String resourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + tenantCode + "/resources";
String udfsPath = HadoopUtils.getHdfsUdfDir(tenantCode);
//init hdfs resource
@ -230,25 +230,34 @@ public class TenantService extends BaseService{
Tenant tenant = tenantMapper.queryById(id);
String tenantPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode();
String resourcePath = HadoopUtils.getHdfsDir(tenant.getTenantCode());
FileStatus[] fileStatus = HadoopUtils.getInstance().listFileStatus(resourcePath);
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_RESOURCES_FILE_EXISTS);
return result;
}
fileStatus = HadoopUtils.getInstance().listFileStatus(HadoopUtils.getHdfsUdfDir(tenant.getTenantCode()));
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_UDFS_FILE_EXISTS);
if (tenant == null){
putMsg(result, Status.TENANT_NOT_EXIST);
return result;
}
HadoopUtils.getInstance().delete(tenantPath, true);
// if resource upload startup
if (PropertyUtils.getResUploadStartupState()){
String tenantPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode();
if (HadoopUtils.getInstance().exists(tenantPath)){
String resourcePath = HadoopUtils.getHdfsDir(tenant.getTenantCode());
FileStatus[] fileStatus = HadoopUtils.getInstance().listFileStatus(resourcePath);
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_RESOURCES_FILE_EXISTS);
return result;
}
fileStatus = HadoopUtils.getInstance().listFileStatus(HadoopUtils.getHdfsUdfDir(tenant.getTenantCode()));
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_UDFS_FILE_EXISTS);
return result;
}
HadoopUtils.getInstance().delete(tenantPath, true);
}
}
tenantMapper.deleteById(id);
putMsg(result, Status.SUCCESS);
return result;
}
@ -261,9 +270,6 @@ public class TenantService extends BaseService{
public Map<String, Object> queryTenantList(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
if (checkAdmin(loginUser, result)) {
return result;
}
List<Tenant> resourceList = tenantMapper.queryAllTenant();
result.put(Constants.DATA_LIST, resourceList);

12
escheduler-api/src/main/java/cn/escheduler/api/service/UdfFuncService.java

@ -80,9 +80,9 @@ public class UdfFuncService extends BaseService{
int resourceId) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -167,9 +167,9 @@ public class UdfFuncService extends BaseService{
// verify udfFunc is exist
UdfFunc udf = udfFuncMapper.queryUdfById(udfFuncId);
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}

54
escheduler-api/src/main/java/cn/escheduler/api/service/UsersService.java

@ -125,7 +125,7 @@ public class UsersService extends BaseService {
Tenant tenant = tenantMapper.queryById(tenantId);
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
if (PropertyUtils.getResUploadStartupState()){
String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode() + "/home/" + user.getId();
HadoopUtils.getInstance().mkdir(userPath);
@ -245,35 +245,35 @@ public class UsersService extends BaseService {
Tenant newTenant = tenantMapper.queryById(tenantId);
if (newTenant != null) {
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
if (PropertyUtils.getResUploadStartupState() && oldTenant != null){
String newTenantCode = newTenant.getTenantCode();
String oldResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/resources";
String oldUdfsPath = HadoopUtils.getHdfsUdfDir(oldTenant.getTenantCode());
if (HadoopUtils.getInstance().exists(oldResourcePath)){
String newResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenantCode + "/resources";
String newUdfsPath = HadoopUtils.getHdfsUdfDir(newTenantCode);
String newResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenantCode + "/resources";
String newUdfsPath = HadoopUtils.getHdfsUdfDir(newTenantCode);
//file resources list
List<Resource> fileResourcesList = resourceMapper.queryResourceCreatedByUser(userId, 0);
if (CollectionUtils.isNotEmpty(fileResourcesList)) {
for (Resource resource : fileResourcesList) {
HadoopUtils.getInstance().copy(oldResourcePath + "/" + resource.getAlias(), newResourcePath, false, true);
//file resources list
List<Resource> fileResourcesList = resourceMapper.queryResourceCreatedByUser(userId, 0);
if (CollectionUtils.isNotEmpty(fileResourcesList)) {
for (Resource resource : fileResourcesList) {
HadoopUtils.getInstance().copy(oldResourcePath + "/" + resource.getAlias(), newResourcePath, false, true);
}
}
}
//udf resources
List<Resource> udfResourceList = resourceMapper.queryResourceCreatedByUser(userId, 1);
if (CollectionUtils.isNotEmpty(udfResourceList)) {
for (Resource resource : udfResourceList) {
HadoopUtils.getInstance().copy(oldUdfsPath + "/" + resource.getAlias(), newUdfsPath, false, true);
//udf resources
List<Resource> udfResourceList = resourceMapper.queryResourceCreatedByUser(userId, 1);
if (CollectionUtils.isNotEmpty(udfResourceList)) {
for (Resource resource : udfResourceList) {
HadoopUtils.getInstance().copy(oldUdfsPath + "/" + resource.getAlias(), newUdfsPath, false, true);
}
}
}
//Delete the user from the old tenant directory
String oldUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/home/" + userId;
HadoopUtils.getInstance().delete(oldUserPath, true);
//Delete the user from the old tenant directory
String oldUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/home/" + userId;
HadoopUtils.getInstance().delete(oldUserPath, true);
}
//create user in the new tenant directory
String newUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenant.getTenantCode() + "/home/" + user.getId();
@ -307,11 +307,13 @@ public class UsersService extends BaseService {
// delete user
User user = userMapper.queryTenantCodeByUserId(id);
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + user.getTenantCode() + "/home/" + id;
HadoopUtils.getInstance().delete(userPath, true);
if (user != null) {
if (PropertyUtils.getResUploadStartupState()) {
String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + user.getTenantCode() + "/home/" + id;
if (HadoopUtils.getInstance().exists(userPath)) {
HadoopUtils.getInstance().delete(userPath, true);
}
}
}
userMapper.delete(id);

4
escheduler-api/src/main/java/cn/escheduler/api/utils/CheckUtils.java

@ -18,8 +18,10 @@ package cn.escheduler.api.utils;
import cn.escheduler.api.enums.Status;
import cn.escheduler.common.enums.ResUploadType;
import cn.escheduler.common.task.AbstractParameters;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.common.utils.PropertyUtils;
import cn.escheduler.common.utils.TaskParametersUtils;
import org.apache.commons.lang.StringUtils;
@ -28,6 +30,7 @@ import java.util.HashMap;
import java.util.Map;
import java.util.regex.Pattern;
import static cn.escheduler.common.utils.PropertyUtils.getBoolean;
/**
@ -157,5 +160,4 @@ public class CheckUtils {
return pattern.matcher(str).matches();
}
}

1
escheduler-api/src/main/java/cn/escheduler/api/utils/Constants.java

@ -111,6 +111,7 @@ public class Constants {
public static final String ADDRESS = "address";
public static final String DATABASE = "database";
public static final String JDBC_URL = "jdbcUrl";
public static final String PRINCIPAL = "principal";
public static final String USER = "user";
public static final String PASSWORD = "password";
public static final String OTHER = "other";

39
escheduler-api/src/main/java/cn/escheduler/api/utils/ZookeeperMonitorUtils.java → escheduler-api/src/main/java/cn/escheduler/api/utils/ZookeeperMonitor.java

@ -1,7 +1,9 @@
package cn.escheduler.api.utils;
import cn.escheduler.common.zk.AbstractZKClient;
import cn.escheduler.dao.model.MasterServer;
import cn.escheduler.dao.model.ZookeeperRecord;
import cn.escheduler.server.ResInfo;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -9,14 +11,15 @@ import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* monitor zookeeper info
*/
public class ZookeeperMonitorUtils {
public class ZookeeperMonitor extends AbstractZKClient{
private static final Logger LOG = LoggerFactory.getLogger(ZookeeperMonitorUtils.class);
private static final Logger LOG = LoggerFactory.getLogger(ZookeeperMonitor.class);
private static final String zookeeperList = AbstractZKClient.getZookeeperQuorum();
/**
@ -33,6 +36,38 @@ public class ZookeeperMonitorUtils {
return null;
}
/**
* get server list.
* @param isMaster
* @return
*/
public List<MasterServer> getServers(boolean isMaster){
List<MasterServer> masterServers = new ArrayList<>();
Map<String, String> masterMap = getServerList(isMaster);
String parentPath = isMaster ? getMasterZNodeParentPath() : getWorkerZNodeParentPath();
for(String path : masterMap.keySet()){
MasterServer masterServer = ResInfo.parseHeartbeatForZKInfo(masterMap.get(path));
masterServer.setZkDirectory( parentPath + "/"+ path);
masterServers.add(masterServer);
}
return masterServers;
}
/**
* get master servers
* @return
*/
public List<MasterServer> getMasterServers(){
return getServers(true);
}
/**
* master construct is the same with worker, use the master instead
* @return
*/
public List<MasterServer> getWorkerServers(){
return getServers(false);
}
private static List<ZookeeperRecord> zookeeperInfoList(String zookeeperServers) {

54
escheduler-api/src/main/resources/combined_logback.xml

@ -0,0 +1,54 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
%highlight([%level]) %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{10}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="TASKLOGFILE" class="cn.escheduler.server.worker.log.TaskLogAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="cn.escheduler.server.worker.log.TaskLogFilter"></filter>
<file>${log.base}/{processDefinitionId}/{processInstanceId}/{taskInstanceId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
<appender name="COMBINEDLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/escheduler-combined.log</file>
<filter class="cn.escheduler.server.worker.log.WorkerLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-combined.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
     
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
  
</appender>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="COMBINEDLOGFILE"/>
</root>
</configuration>

12
escheduler-api/src/main/resources/i18n/messages.properties

@ -1,4 +1,16 @@
QUERY_SCHEDULE_LIST_NOTES=query schedule list
EXECUTE_PROCESS_TAG=execute process related operation
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
RUN_PROCESS_INSTANCE_NOTES=run process instance
START_NODE_LIST=start node list(node name)
TASK_DEPEND_TYPE=task depend type
COMMAND_TYPE=command type
RUN_MODE=run mode
TIMEOUT=timeout
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
EXECUTE_TYPE=execute type
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
GET_RECEIVER_CC_NOTES=query receiver cc
DESC=description
GROUP_NAME=group name
GROUP_TYPE=group type

12
escheduler-api/src/main/resources/i18n/messages_en_US.properties

@ -1,4 +1,16 @@
QUERY_SCHEDULE_LIST_NOTES=query schedule list
EXECUTE_PROCESS_TAG=execute process related operation
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
RUN_PROCESS_INSTANCE_NOTES=run process instance
START_NODE_LIST=start node list(node name)
TASK_DEPEND_TYPE=task depend type
COMMAND_TYPE=command type
RUN_MODE=run mode
TIMEOUT=timeout
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
EXECUTE_TYPE=execute type
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
GET_RECEIVER_CC_NOTES=query receiver cc
DESC=description
GROUP_NAME=group name
GROUP_TYPE=group type

10
escheduler-api/src/main/resources/i18n/messages_zh_CN.properties

@ -1,4 +1,14 @@
QUERY_SCHEDULE_LIST_NOTES=查询定时列表
PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作
RUN_PROCESS_INSTANCE_NOTES=运行流程实例
START_NODE_LIST=开始节点列表(节点name)
TASK_DEPEND_TYPE=任务依赖类型
COMMAND_TYPE=指令类型
RUN_MODE=运行模式
TIMEOUT=超时时间
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等)
EXECUTE_TYPE=执行类型
START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义
DESC=备注(描述)
GROUP_NAME=组名称
GROUP_TYPE=组类型

42
escheduler-api/src/main/resources/logback.xml

@ -1,42 +0,0 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- Log level filter -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<file>${log.base}/escheduler-api-server.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT" />
</root>
</configuration>

24
escheduler-api/src/test/java/cn/escheduler/api/controller/ResourcesControllerTest.java

@ -34,6 +34,8 @@ import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.MvcResult;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.web.context.WebApplicationContext;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
@ -43,7 +45,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
@RunWith(SpringRunner.class)
@SpringBootTest
public class ResourcesControllerTest {
private static Logger logger = LoggerFactory.getLogger(QueueControllerTest.class);
private static Logger logger = LoggerFactory.getLogger(ResourcesControllerTest.class);
private MockMvc mockMvc;
@ -71,4 +73,24 @@ public class ResourcesControllerTest {
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void verifyResourceName() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("name","list_resources_1.sh");
paramsMap.add("type","FILE");
MvcResult mvcResult = mockMvc.perform(get("/resources/verify-name")
.header("sessionId", "c24ed9d9-1c20-48a0-bd9c-5cfca14a4dcb")
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
}

13
escheduler-api/src/test/java/cn/escheduler/api/controller/SchedulerControllerTest.java

@ -64,4 +64,17 @@ public class SchedulerControllerTest {
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void previewSchedule() throws Exception {
MvcResult mvcResult = mockMvc.perform(post("/projects/{projectName}/schedule/preview","li_test_1")
.header("sessionId", "c24ed9d9-1c20-48a0-bd9c-5cfca14a4dcb")
.param("schedule","{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
}

29
escheduler-api/src/test/java/cn/escheduler/api/utils/ZookeeperMonitorUtilsTest.java

@ -0,0 +1,29 @@
package cn.escheduler.api.utils;
import cn.escheduler.dao.model.MasterServer;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
public class ZookeeperMonitorUtilsTest {
@Test
public void testGetMasterLsit(){
ZookeeperMonitor zookeeperMonitor = new ZookeeperMonitor();
List<MasterServer> masterServerList = zookeeperMonitor.getMasterServers();
List<MasterServer> workerServerList = zookeeperMonitor.getWorkerServers();
Assert.assertEquals(masterServerList.size(), 1);
Assert.assertEquals(workerServerList.size(), 1);
}
}

2
escheduler-common/pom.xml

@ -4,7 +4,7 @@
<parent>
<artifactId>escheduler</artifactId>
<groupId>cn.analysys</groupId>
<version>1.0.3-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<artifactId>escheduler-common</artifactId>
<name>escheduler-common</name>

90
escheduler-common/src/main/java/cn/escheduler/common/Constants.java

@ -60,6 +60,23 @@ public final class Constants {
*/
public static final String FS_DEFAULTFS = "fs.defaultFS";
/**
* fs s3a endpoint
*/
public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint";
/**
* fs s3a access key
*/
public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key";
/**
* fs s3a secret key
*/
public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key";
/**
* yarn.resourcemanager.ha.rm.idsfs.defaultFS
*/
@ -70,6 +87,11 @@ public final class Constants {
*/
public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address";
/**
* hdfs configuration
* hdfs.root.user
*/
public static final String HDFS_ROOT_USER = "hdfs.root.user";
/**
* hdfs configuration
@ -118,9 +140,9 @@ public final class Constants {
public static final String DEVELOPMENT_STATE = "development.state";
/**
* hdfs.startup.state
* res.upload.startup.type
*/
public static final String HDFS_STARTUP_STATE = "hdfs.startup.state";
public static final String RES_UPLOAD_STARTUP_TYPE = "res.upload.startup.type";
/**
* zookeeper quorum
@ -197,6 +219,11 @@ public final class Constants {
*/
public static final String SEMICOLON = ";";
/**
* DOT .
*/
public static final String DOT = ".";
/**
* ZOOKEEPER_SESSION_TIMEOUT
*/
@ -241,7 +268,11 @@ public final class Constants {
*/
public static final String SCHEDULER_TASKS_QUEUE = "tasks_queue";
/**
* escheduler need kill tasks queue
*/
public static final String SCHEDULER_TASKS_KILL = "tasks_kill";
public static final String ZOOKEEPER_SCHEDULER_ROOT = "zookeeper.escheduler.root";
public static final String SCHEDULER_QUEUE_IMPL = "escheduler.queue.impl";
@ -252,6 +283,11 @@ public final class Constants {
*/
public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss";
/**
* date format of yyyyMMdd
*/
public static final String YYYYMMDD = "yyyyMMdd";
/**
* date format of yyyyMMddHHmmss
*/
@ -301,7 +337,7 @@ public final class Constants {
/**
* email regex
*/
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+$");
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$");
/**
* read permission
@ -336,11 +372,6 @@ public final class Constants {
*/
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
/**
* max task timeout
*/
public static final int MAX_PROCESS_TIMEOUT = Integer.MAX_VALUE;
/**
* heartbeat threads number
@ -457,6 +488,10 @@ public final class Constants {
public static final String TASK_RECORD_PWD = "task.record.datasource.password";
public static final String DEFAULT = "Default";
public static final String PASSWORD = "password";
public static final String XXXXXX = "******";
public static String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd";
public static String TASK_RECORD_TABLE_HISTORY_HIVE_LOG = "eamp_hive_hist_log_hd";
@ -827,6 +862,43 @@ public final class Constants {
/**
*
* preview schedule execute count
*/
public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5;
/**
* java.security.krb5.conf
*/
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
/**
* java.security.krb5.conf.path
*/
public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state";
/**
* loginUserFromKeytab user
*/
public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username";
/**
* default worker group id
*/
public static final int DEFAULT_WORKER_ID = -1;
/**
* loginUserFromKeytab path
*/
public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path";
}

29
escheduler-common/src/main/java/cn/escheduler/common/enums/ResUploadType.java

@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* data base types
*/
public enum ResUploadType {
/**
* 0 hdfs
* 1 s3
* 2 none
*/
HDFS,S3,NONE
}

35
escheduler-common/src/main/java/cn/escheduler/common/enums/TaskRecordStatus.java

@ -0,0 +1,35 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* task record status
*
*/
public enum TaskRecordStatus {
/**
* status
* 0 sucess
* 1 failure
* 2 exception
*/
SUCCESS,FAILURE,EXCEPTION
}

8
escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java

@ -30,5 +30,11 @@ public enum TaskType {
* 6 PYTHON
* 7 DEPENDENT
*/
SHELL,SQL, SUB_PROCESS,PROCEDURE,MR,SPARK,PYTHON,DEPENDENT
SHELL,SQL, SUB_PROCESS,PROCEDURE,MR,SPARK,PYTHON,DEPENDENT;
public static boolean typeIsNormalTask(String typeName) {
TaskType taskType = TaskType.valueOf(typeName);
return !(taskType == TaskType.SUB_PROCESS || taskType == TaskType.DEPENDENT);
}
}

15
escheduler-common/src/main/java/cn/escheduler/common/enums/ZKNodeType.java

@ -0,0 +1,15 @@
package cn.escheduler.common.enums;
/**
* zk node type
*/
public enum ZKNodeType {
/**
* 0 do not send warning;
* 1 send if process success;
* 2 send if process failed;
* 3 send if process ending;
*/
MASTER, WORKER, DEAD_SERVER, TASK_QUEUE;
}

20
escheduler-common/src/main/java/cn/escheduler/common/job/db/BaseDataSource.java

@ -45,6 +45,18 @@ public abstract class BaseDataSource {
*/
private String other;
/**
* principal
*/
private String principal;
public String getPrincipal() {
return principal;
}
public void setPrincipal(String principal) {
this.principal = principal;
}
/**
* test whether the data source can be connected successfully
* @throws Exception
@ -73,14 +85,14 @@ public abstract class BaseDataSource {
this.password = password;
}
public String getAddress() {
return address;
}
public void setAddress(String address) {
this.address = address;
}
public String getAddress() {
return address;
}
public String getDatabase() {
return database;
}

13
escheduler-common/src/main/java/cn/escheduler/common/job/db/HiveDataSource.java

@ -17,12 +17,12 @@
package cn.escheduler.common.job.db;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.*;
/**
* data source of hive
@ -32,6 +32,8 @@ public class HiveDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(HiveDataSource.class);
/**
* gets the JDBC url for the data source connection
* @return
@ -43,7 +45,7 @@ public class HiveDataSource extends BaseDataSource {
jdbcUrl += "/";
}
jdbcUrl += getDatabase();
jdbcUrl += getDatabase() + ";principal=" + getPrincipal();
if (StringUtils.isNotEmpty(getOther())) {
jdbcUrl += ";" + getOther();
@ -67,11 +69,10 @@ public class HiveDataSource extends BaseDataSource {
try {
con.close();
} catch (SQLException e) {
logger.error("Postgre datasource try conn close conn error", e);
logger.error("hive datasource try conn close conn error", e);
throw e;
}
}
}
}
}

3
escheduler-common/src/main/java/cn/escheduler/common/job/db/SparkDataSource.java

@ -31,7 +31,6 @@ public class SparkDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(SparkDataSource.class);
/**
* gets the JDBC url for the data source connection
* @return
@ -43,7 +42,7 @@ public class SparkDataSource extends BaseDataSource {
jdbcUrl += "/";
}
jdbcUrl += getDatabase();
jdbcUrl += getDatabase() + ";principal=" + getPrincipal();
if (StringUtils.isNotEmpty(getOther())) {
jdbcUrl += ";" + getOther();

9
escheduler-common/src/main/java/cn/escheduler/common/queue/ITaskQueue.java

@ -24,20 +24,17 @@ public interface ITaskQueue {
/**
* take out all the elements
*
* this method has deprecated
* use checkTaskExists instead
*
* @param key
* @return
*/
@Deprecated
List<String> getAllTasks(String key);
/**
* check task exists in the task queue or not
*
* @param key queue name
* @param task ${priority}_${processInstanceId}_${taskId}
* @param task ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
* @return true if exists in the queue
*/
boolean checkTaskExists(String key, String task);
@ -54,10 +51,10 @@ public interface ITaskQueue {
* an element pops out of the queue
*
* @param key queue name
* @param remove whether remove the element
* @param n how many elements to poll
* @return
*/
String poll(String key, boolean remove);
List<String> poll(String key, int n);
/**
* remove a element from queue

2
escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueFactory.java

@ -42,7 +42,7 @@ public class TaskQueueFactory {
public static ITaskQueue getTaskQueueInstance() {
String queueImplValue = CommonUtils.getQueueImplValue();
if (StringUtils.isNotBlank(queueImplValue)) {
// queueImplValue = StringUtils.trim(queueImplValue);
// queueImplValue = IpUtils.trim(queueImplValue);
// if (SCHEDULER_QUEUE_REDIS_IMPL.equals(queueImplValue)) {
// logger.info("task queue impl use reids ");

160
escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java

@ -19,17 +19,17 @@ package cn.escheduler.common.queue;
import cn.escheduler.common.Constants;
import cn.escheduler.common.utils.Bytes;
import cn.escheduler.common.utils.IpUtils;
import cn.escheduler.common.utils.OSUtils;
import cn.escheduler.common.zk.AbstractZKClient;
import org.apache.commons.lang3.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.*;
/**
* A singleton of a task queue implemented with zookeeper
@ -62,7 +62,6 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
* @param key task queue name
* @return
*/
@Deprecated
@Override
public List<String> getAllTasks(String key) {
try {
@ -80,7 +79,7 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
* check task exists in the task queue or not
*
* @param key queue name
* @param task ${priority}_${processInstanceId}_${taskId}
* @param task ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
* @return true if exists in the queue
*/
@Override
@ -110,7 +109,7 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
* add task to tasks queue
*
* @param key task queue name
* @param value ${priority}_${processInstanceId}_${taskId}
* @param value ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}_host1,host2,...
*/
@Override
public void add(String key, String value) {
@ -118,9 +117,6 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
String taskIdPath = getTasksPath(key) + Constants.SINGLE_SLASH + value;
String result = getZkClient().create().withMode(CreateMode.PERSISTENT).forPath(taskIdPath, Bytes.toBytes(value));
// String path = conf.getString(Constants.ZOOKEEPER_SCHEDULER_ROOT) + Constants.SINGLE_SLASH + Constants.SCHEDULER_TASKS_QUEUE + "_add" + Constants.SINGLE_SLASH + value;
// getZkClient().create().creatingParentContainersIfNeeded().withMode(CreateMode.PERSISTENT).forPath(path,
// Bytes.toBytes(value));
logger.info("add task : {} to tasks queue , result success",result);
} catch (Exception e) {
logger.error("add task to tasks queue exception",e);
@ -132,16 +128,16 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
/**
* An element pops out of the queue <p>
* note:
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}_host1,host2,...
* The tasks with the highest priority are selected by comparing the priorities of the above four levels from high to low.
*
* 流程实例优先级_流程实例id_任务优先级_任务id high <- low
* 流程优先级_流程实例id_任务优先级_任务id_任务执行的机器id1,任务执行的机器id2,... high <- low
* @param key task queue name
* @param remove whether remove the element
* @return the task id to be executed
* @param tasksNum how many elements to poll
* @return the task ids to be executed
*/
@Override
public String poll(String key, boolean remove) {
public List<String> poll(String key, int tasksNum) {
try{
CuratorFramework zk = getZkClient();
String tasksQueuePath = getTasksPath(key) + Constants.SINGLE_SLASH;
@ -149,53 +145,123 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
if(list != null && list.size() > 0){
String workerIp = OSUtils.getHost();
String workerIpLongStr = String.valueOf(IpUtils.ipToLong(workerIp));
int size = list.size();
String formatTargetTask = null;
String targetTaskKey = null;
Set<String> taskTreeSet = new TreeSet<>(new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
String s1 = o1;
String s2 = o2;
String[] s1Array = s1.split(Constants.UNDERLINE);
if(s1Array.length>4){
// warning: if this length > 5, need to be changed
s1 = s1.substring(0, s1.lastIndexOf(Constants.UNDERLINE) );
}
String[] s2Array = s2.split(Constants.UNDERLINE);
if(s2Array.length>4){
// warning: if this length > 5, need to be changed
s2 = s2.substring(0, s2.lastIndexOf(Constants.UNDERLINE) );
}
return s1.compareTo(s2);
}
});
for (int i = 0; i < size; i++) {
String taskDetail = list.get(i);
String[] taskDetailArrs = taskDetail.split(Constants.UNDERLINE);
if(taskDetailArrs.length == 4){
//forward compatibility 向前版本兼容
if(taskDetailArrs.length >= 4){
//format ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
String formatTask = String.format("%s_%010d_%s_%010d", taskDetailArrs[0], Long.parseLong(taskDetailArrs[1]), taskDetailArrs[2], Long.parseLong(taskDetailArrs[3]));
if(i > 0){
int result = formatTask.compareTo(formatTargetTask);
if(result < 0){
formatTargetTask = formatTask;
targetTaskKey = taskDetail;
if(taskDetailArrs.length > 4){
String taskHosts = taskDetailArrs[4];
//task can assign to any worker host if equals default ip value of worker server
if(!taskHosts.equals(String.valueOf(Constants.DEFAULT_WORKER_ID))){
String[] taskHostsArr = taskHosts.split(Constants.COMMA);
if(!Arrays.asList(taskHostsArr).contains(workerIpLongStr)){
continue;
}
}
}else{
formatTargetTask = formatTask;
targetTaskKey = taskDetail;
formatTask += Constants.UNDERLINE + taskDetailArrs[4];
}
}else{
logger.error("task queue poll error, task detail :{} , please check!", taskDetail);
taskTreeSet.add(formatTask);
}
}
if(formatTargetTask != null){
String taskIdPath = tasksQueuePath + targetTaskKey;
}
logger.info("consume task {}", taskIdPath);
List<String> taskslist = getTasksListFromTreeSet(tasksNum, taskTreeSet);
String[] vals = targetTaskKey.split(Constants.UNDERLINE);
logger.info("consume tasks: {},there still have {} tasks need to be executed", Arrays.toString(taskslist.toArray()), size - taskslist.size());
if(remove){
removeNode(key, targetTaskKey);
}
logger.info("consume task: {},there still have {} tasks need to be executed", vals[vals.length - 1], size - 1);
return targetTaskKey;
}else{
logger.error("should not go here, task queue poll error, please check!");
}
return taskslist;
}else{
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
}
} catch (Exception e) {
logger.error("add task to tasks queue exception",e);
}
return null;
return new ArrayList<String>();
}
/**
* get task list from tree set
*
* @param tasksNum
* @param taskTreeSet
*/
public List<String> getTasksListFromTreeSet(int tasksNum, Set<String> taskTreeSet) {
Iterator<String> iterator = taskTreeSet.iterator();
int j = 0;
List<String> taskslist = new ArrayList<>(tasksNum);
while(iterator.hasNext()){
if(j++ >= tasksNum){
break;
}
String task = iterator.next();
taskslist.add(getOriginTaskFormat(task));
}
return taskslist;
}
/**
* format ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
* processInstanceId and task id need to be convert to int.
* @param formatTask
* @return
*/
private String getOriginTaskFormat(String formatTask){
String[] taskArray = formatTask.split(Constants.UNDERLINE);
if(taskArray.length< 4){
return formatTask;
}
int processInstanceId = Integer.parseInt(taskArray[1]);
int taskId = Integer.parseInt(taskArray[3]);
StringBuilder sb = new StringBuilder(50);
String destTask = String.format("%s_%s_%s_%s", taskArray[0], processInstanceId, taskArray[2], taskId);
sb.append(destTask);
if(taskArray.length > 4){
for(int index = 4; index < taskArray.length; index++){
sb.append(Constants.UNDERLINE).append(taskArray[index]);
}
}
return sb.toString();
}
@Override
@ -354,16 +420,6 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
}
}
/**
* get zookeeper client of CuratorFramework
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/**
* Get the task queue path
* @param key task queue name

11
escheduler-common/src/main/java/cn/escheduler/common/utils/CommonUtils.java

@ -17,6 +17,7 @@
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ResUploadType;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -63,4 +64,14 @@ public class CommonUtils {
/**
* if upload resource is HDFS and kerberos startup is true , else false
* @return
*/
public static boolean getKerberosStartupState(){
String resUploadStartupType = PropertyUtils.getString(cn.escheduler.common.Constants.RES_UPLOAD_STARTUP_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
Boolean kerberosStartupState = getBoolean(cn.escheduler.common.Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE);
return resUploadType == ResUploadType.HDFS && kerberosStartupState;
}
}

6
escheduler-common/src/main/java/cn/escheduler/common/utils/DependentUtils.java

@ -95,6 +95,9 @@ public class DependentUtils {
case "last7Days":
result = DependentDateUtils.getLastDayInterval(businessDate, 7);
break;
case "thisWeek":
result = DependentDateUtils.getThisWeekInterval(businessDate);
break;
case "lastWeek":
result = DependentDateUtils.getLastWeekInterval(businessDate);
break;
@ -119,6 +122,9 @@ public class DependentUtils {
case "lastSunday":
result = DependentDateUtils.getLastWeekOneDayInterval(businessDate, 7);
break;
case "thisMonth":
result = DependentDateUtils.getThisMonthInterval(businessDate);
break;
case "lastMonth":
result = DependentDateUtils.getLastMonthInterval(businessDate);
break;

102
escheduler-common/src/main/java/cn/escheduler/common/utils/HadoopUtils.java

@ -18,31 +18,30 @@ package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ExecutionStatus;
import cn.escheduler.common.enums.ResUploadType;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.client.cli.RMAdminCLI;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.security.PrivilegedExceptionAction;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static cn.escheduler.common.Constants.*;
import static cn.escheduler.common.utils.PropertyUtils.getInt;
import static cn.escheduler.common.utils.PropertyUtils.*;
import static cn.escheduler.common.utils.PropertyUtils.getString;
import static cn.escheduler.common.utils.PropertyUtils.getPrefixedProperties;
/**
* hadoop utils
@ -52,18 +51,41 @@ public class HadoopUtils implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class);
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
private static volatile HadoopUtils instance = new HadoopUtils();
private static volatile Configuration configuration;
private static FileSystem fs;
private HadoopUtils(){
if(StringUtils.isEmpty(hdfsUser)){
hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
}
init();
initHdfsPath();
}
public static HadoopUtils getInstance(){
return instance;
}
/**
* init escheduler root path in hdfs
*/
private void initHdfsPath(){
String hdfsPath = getString(Constants.DATA_STORE_2_HDFS_BASEPATH);
Path path = new Path(hdfsPath);
try {
if (!fs.exists(path)) {
fs.mkdirs(path);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
}
/**
* init hadoop configuration
*/
@ -73,26 +95,62 @@ public class HadoopUtils implements Closeable {
if (configuration == null) {
try {
configuration = new Configuration();
String defaultFS = configuration.get(FS_DEFAULTFS);
//first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file
// the default is the local file system
if(defaultFS.startsWith("file")){
String defaultFSProp = getString(FS_DEFAULTFS);
if(StringUtils.isNotBlank(defaultFSProp)){
Map<String, String> fsRelatedProps = getPrefixedProperties("fs.");
configuration.set(FS_DEFAULTFS,defaultFSProp);
fsRelatedProps.entrySet().stream().forEach(entry -> configuration.set(entry.getKey(), entry.getValue()));
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
if (resUploadType == ResUploadType.HDFS){
if (getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE)){
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
String defaultFS = configuration.get(FS_DEFAULTFS);
//first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file
// the default is the local file system
if(defaultFS.startsWith("file")){
String defaultFSProp = getString(FS_DEFAULTFS);
if(StringUtils.isNotBlank(defaultFSProp)){
Map<String, String> fsRelatedProps = getPrefixedProperties("fs.");
configuration.set(FS_DEFAULTFS,defaultFSProp);
fsRelatedProps.entrySet().stream().forEach(entry -> configuration.set(entry.getKey(), entry.getValue()));
}else{
logger.error("property:{} can not to be empty, please set!");
throw new RuntimeException("property:{} can not to be empty, please set!");
}
}else{
logger.error("property:{} can not to be empty, please set!");
throw new RuntimeException("property:{} can not to be empty, please set!");
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", FS_DEFAULTFS, defaultFS);
}
}else{
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", FS_DEFAULTFS, defaultFS);
}
if (fs == null) {
if (fs == null) {
if(StringUtils.isNotEmpty(hdfsUser)){
//UserGroupInformation ugi = UserGroupInformation.createProxyUser(hdfsUser,UserGroupInformation.getLoginUser());
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
@Override
public Boolean run() throws Exception {
fs = FileSystem.get(configuration);
return true;
}
});
}else{
logger.warn("hdfs.root.user is not set value!");
fs = FileSystem.get(configuration);
}
}
}else if (resUploadType == ResUploadType.S3){
configuration.set(FS_DEFAULTFS,getString(FS_DEFAULTFS));
configuration.set(FS_S3A_ENDPOINT,getString(FS_S3A_ENDPOINT));
configuration.set(FS_S3A_ACCESS_KEY,getString(FS_S3A_ACCESS_KEY));
configuration.set(FS_S3A_SECRET_KEY,getString(FS_S3A_SECRET_KEY));
fs = FileSystem.get(configuration);
}
String rmHaIds = getString(YARN_RESOURCEMANAGER_HA_RM_IDS);
String appAddress = getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
if (!StringUtils.isEmpty(rmHaIds)) {
@ -155,7 +213,7 @@ public class HadoopUtils implements Closeable {
*/
public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException {
if(StringUtils.isBlank(hdfsFilePath)){
if (StringUtils.isBlank(hdfsFilePath)){
logger.error("hdfs file path:{} is blank",hdfsFilePath);
return null;
}

64
escheduler-common/src/main/java/cn/escheduler/common/utils/IpUtils.java

@ -0,0 +1,64 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* http utils
*/
public class IpUtils {
private static final Logger logger = LoggerFactory.getLogger(IpUtils.class);
public static final String DOT = ".";
/**
* ip str to long <p>
*
* @param ipStr ip string
*/
public static Long ipToLong(String ipStr) {
String[] ipSet = ipStr.split("\\" + DOT);
return Long.parseLong(ipSet[0]) << 24 | Long.parseLong(ipSet[1]) << 16 | Long.parseLong(ipSet[2]) << 8 | Long.parseLong(ipSet[3]);
}
/**
* long to ip
* @param ipLong the long number converted from IP
* @return String
*/
public static String longToIp(long ipLong) {
long[] ipNumbers = new long[4];
long tmp = 0xFF;
ipNumbers[0] = ipLong >> 24 & tmp;
ipNumbers[1] = ipLong >> 16 & tmp;
ipNumbers[2] = ipLong >> 8 & tmp;
ipNumbers[3] = ipLong & tmp;
StringBuilder sb = new StringBuilder(16);
sb.append(ipNumbers[0]).append(DOT)
.append(ipNumbers[1]).append(DOT)
.append(ipNumbers[2]).append(DOT)
.append(ipNumbers[3]);
return sb.toString();
}
}

2
escheduler-common/src/main/java/cn/escheduler/common/utils/OSUtils.java

@ -220,7 +220,7 @@ public class OSUtils {
* @throws IOException
*/
public static String exeShell(String command) throws IOException {
return ShellExecutor.execCommand("groups");
return ShellExecutor.execCommand(command);
}
/**

14
escheduler-common/src/main/java/cn/escheduler/common/utils/PropertyUtils.java

@ -16,6 +16,8 @@
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ResUploadType;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -65,11 +67,15 @@ public class PropertyUtils {
}
}
/*
public static PropertyUtils getInstance(){
return propertyUtils;
/**
* judge whether resource upload startup
* @return
*/
public static Boolean getResUploadStartupState(){
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
return resUploadType == ResUploadType.HDFS || resUploadType == ResUploadType.S3;
}
*/
/**
* get property value

20
escheduler-common/src/main/java/cn/escheduler/common/utils/dependent/DependentDateUtils.java

@ -76,6 +76,16 @@ public class DependentDateUtils {
return dateIntervals;
}
/**
* get interval between this month first day and businessDate
* @param businessDate
* @return
*/
public static List<DateInterval> getThisMonthInterval(Date businessDate) {
Date firstDay = DateUtils.getFirstDayOfMonth(businessDate);
return getDateIntervalListBetweenTwoDates(firstDay, businessDate);
}
/**
* get interval between last month first day and last day
* @param businessDate
@ -108,6 +118,16 @@ public class DependentDateUtils {
}
}
/**
* get interval between monday to businessDate of this week
* @param businessDate
* @return
*/
public static List<DateInterval> getThisWeekInterval(Date businessDate) {
Date mondayThisWeek = DateUtils.getMonday(businessDate);
return getDateIntervalListBetweenTwoDates(mondayThisWeek, businessDate);
}
/**
* get interval between monday to sunday of last week
* default set monday the first day of week

92
escheduler-common/src/main/java/cn/escheduler/common/zk/AbstractZKClient.java

@ -30,13 +30,12 @@ import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.framework.state.ConnectionStateListener;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.zookeeper.CreateMode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.*;
import static cn.escheduler.common.Constants.*;
@ -213,9 +212,9 @@ public abstract class AbstractZKClient {
protected void initSystemZNode(){
try {
// read master node parent path from conf
masterZNodeParentPath = conf.getString(Constants.ZOOKEEPER_ESCHEDULER_MASTERS);
masterZNodeParentPath = getMasterZNodeParentPath();
// read worker node parent path from conf
workerZNodeParentPath = conf.getString(Constants.ZOOKEEPER_ESCHEDULER_WORKERS);
workerZNodeParentPath = getWorkerZNodeParentPath();
// read server node parent path from conf
deadServerZNodeParentPath = conf.getString(ZOOKEEPER_ESCHEDULER_DEAD_SERVERS);
@ -243,6 +242,7 @@ public abstract class AbstractZKClient {
}
}
public void removeDeadServerByHost(String host, String serverType) throws Exception {
List<String> deadServers = zkClient.getChildren().forPath(deadServerZNodeParentPath);
for(String serverPath : deadServers){
@ -291,6 +291,8 @@ public abstract class AbstractZKClient {
}
/**
* for stop server
* @param serverStoppable
@ -312,7 +314,10 @@ public abstract class AbstractZKClient {
childrenList = zkClient.getChildren().forPath(masterZNodeParentPath);
}
} catch (Exception e) {
logger.warn(e.getMessage(),e);
if(!e.getMessage().contains("java.lang.IllegalStateException: instance must be started")){
logger.warn(e.getMessage(),e);
}
return childrenList.size();
}
return childrenList.size();
@ -336,6 +341,81 @@ public abstract class AbstractZKClient {
return sb.toString();
}
/**
* get master server list map.
* result : {host : resource info}
* @return
*/
public Map<String, String> getServerList(boolean isMaster ){
Map<String, String> masterMap = new HashMap<>();
try {
String path = isMaster ? getMasterZNodeParentPath() : getWorkerZNodeParentPath();
List<String> serverList = getZkClient().getChildren().forPath(path);
for(String server : serverList){
byte[] bytes = getZkClient().getData().forPath(path + "/" + server);
masterMap.putIfAbsent(server, new String(bytes));
}
} catch (Exception e) {
e.printStackTrace();
}
return masterMap;
}
/**
* get zkclient
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/**
* get worker node parent path
* @return
*/
protected String getWorkerZNodeParentPath(){return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_WORKERS);};
/**
* get master node parent path
* @return
*/
protected String getMasterZNodeParentPath(){return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_MASTERS);}
/**
* get master lock path
* @return
*/
public String getMasterLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_MASTERS);
}
/**
* get master start up lock path
* @return
*/
public String getMasterStartUpLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS);
}
/**
* get master failover lock path
* @return
*/
public String getMasterFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_MASTERS);
}
/**
* get worker failover lock path
* @return
*/
public String getWorkerFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_WORKERS);
}
@Override
public String toString() {
return "AbstractZKClient{" +

21
escheduler-common/src/main/resources/common/common.properties

@ -10,11 +10,26 @@ data.download.basedir.path=/tmp/escheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/escheduler/exec
# Users who have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
data.store2hdfs.basepath=/escheduler
# whether hdfs starts
hdfs.startup.state=true
# resource upload startup type : HDFS,S3,NONE
res.upload.startup.type=NONE
# whether kerberos starts
hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path
java.security.krb5.conf.path=/opt/krb5.conf
# loginUserFromKeytab user
login.user.keytab.username=hdfs-mycluster@ESZ.COM
# loginUserFromKeytab path
login.user.keytab.path=/opt/hdfs.headless.keytab
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
escheduler.env.path=/opt/.escheduler_env.sh
@ -23,5 +38,5 @@ escheduler.env.path=/opt/.escheduler_env.sh
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default "false"
development.state=false
development.state=true

12
escheduler-common/src/main/resources/common/hadoop/hadoop.properties

@ -1,6 +1,16 @@
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
# to the conf directory,support s3,for example : s3a://escheduler
fs.defaultFS=hdfs://mycluster:8020
# s3 need,s3 endpoint
fs.s3a.endpoint=http://192.168.199.91:9010
# s3 need,s3 access key
fs.s3a.access.key=A3DXS30FO22544RE
# s3 need,s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx

6
escheduler-common/src/test/java/cn/escheduler/common/os/OSUtilsTest.java

@ -37,6 +37,12 @@ public class OSUtilsTest {
// static HardwareAbstractionLayer hal = si.getHardware();
@Test
public void getHost(){
logger.info(OSUtils.getHost());
}
@Test
public void memoryUsage() {
logger.info("memoryUsage : {}", OSUtils.memoryUsage());// 0.3361799418926239

87
escheduler-common/src/test/java/cn/escheduler/common/queue/TaskQueueImplTest.java

@ -17,12 +17,15 @@
package cn.escheduler.common.queue;
import cn.escheduler.common.Constants;
import org.junit.Assert;
import cn.escheduler.common.utils.IpUtils;
import cn.escheduler.common.utils.OSUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import static org.junit.Assert.assertEquals;
@ -34,59 +37,60 @@ public class TaskQueueImplTest {
private static final Logger logger = LoggerFactory.getLogger(TaskQueueImplTest.class);
ITaskQueue tasksQueue = null;
@Test
public void testTaskQueue(){
@Before
public void before(){
tasksQueue = TaskQueueFactory.getTaskQueueInstance();
//clear all data
tasksQueue.delete();
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
}
@After
public void after(){
//clear all data
tasksQueue.delete();
}
@Test
public void testAdd(){
//add
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"2");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"3");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"4");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1_0_1_1_-1");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"0_1_1_1_2130706433,3232236775");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1_1_0_1_2130706433,3232236775,"+IpUtils.ipToLong(OSUtils.getHost()));
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1_2_1_1_2130706433,3232236775");
List<String> tasks = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE, 1);
if(tasks.size() <= 0){
return;
}
//pop
String node1 = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE, false);
assertEquals(node1,"1");
String node2 = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE, false);
assertEquals(node2,"2");
//sadd
String task1 = "1.1.1.1-1-mr";
String task2 = "1.1.1.2-2-mr";
String task3 = "1.1.1.3-3-mr";
String task4 = "1.1.1.4-4-mr";
String task5 = "1.1.1.5-5-mr";
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task1);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task2);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task3);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task4);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task5);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task5); //repeat task
Assert.assertEquals(tasksQueue.smembers(Constants.SCHEDULER_TASKS_KILL).size(),5);
logger.info(Arrays.toString(tasksQueue.smembers(Constants.SCHEDULER_TASKS_KILL).toArray()));
//srem
tasksQueue.srem(Constants.SCHEDULER_TASKS_KILL,task5);
//smembers
Assert.assertEquals(tasksQueue.smembers(Constants.SCHEDULER_TASKS_KILL).size(),4);
logger.info(Arrays.toString(tasksQueue.smembers(Constants.SCHEDULER_TASKS_KILL).toArray()));
String node1 = tasks.get(0);
assertEquals(node1,"1_0_1_1_-1");
tasks = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE, 1);
if(tasks.size() <= 0){
return;
}
}
/**
* test one million data from zookeeper queue
*/
@Test
public void extremeTest(){
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
//clear all data
tasksQueue.delete();
int total = 30 * 10000;
for(int i = 0; i < total; i++)
@ -99,14 +103,9 @@ public class TaskQueueImplTest {
}
}
String node1 = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE, false);
String node1 = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE, 1).get(0);
assertEquals(node1,"0");
//clear all data
tasksQueue.delete();
}
}

20
escheduler-common/src/test/java/cn/escheduler/common/utils/DependentUtilsTest.java

@ -80,6 +80,26 @@ public class DependentUtilsTest {
Assert.assertEquals(dateIntervals.get(0), diCur);
dateValue = "thisWeek";
Date firstWeekDay = DateUtils.getMonday(curDay);
dateIntervals = DependentUtils.getDateIntervalList(curDay, dateValue);
DateInterval weekHead = new DateInterval(DateUtils.getStartOfDay(firstWeekDay), DateUtils.getEndOfDay(firstWeekDay));
DateInterval weekThis = new DateInterval(DateUtils.getStartOfDay(curDay), DateUtils.getEndOfDay(curDay));
Assert.assertEquals(dateIntervals.get(0), weekHead);
Assert.assertEquals(dateIntervals.get(dateIntervals.size() - 1), weekThis);
dateValue = "thisMonth";
Date firstMonthDay = DateUtils.getFirstDayOfMonth(curDay);
dateIntervals = DependentUtils.getDateIntervalList(curDay, dateValue);
DateInterval monthHead = new DateInterval(DateUtils.getStartOfDay(firstMonthDay), DateUtils.getEndOfDay(firstMonthDay));
DateInterval monthThis = new DateInterval(DateUtils.getStartOfDay(curDay), DateUtils.getEndOfDay(curDay));
Assert.assertEquals(dateIntervals.get(0), monthHead);
Assert.assertEquals(dateIntervals.get(dateIntervals.size() - 1), monthThis);
}

41
escheduler-common/src/test/java/cn/escheduler/common/utils/IpUtilsTest.java

@ -0,0 +1,41 @@
package cn.escheduler.common.utils;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.*;
public class IpUtilsTest {
@Test
public void ipToLong() {
String ip = "192.168.110.1";
String ip2 = "0.0.0.0";
long longNumber = IpUtils.ipToLong(ip);
long longNumber2 = IpUtils.ipToLong(ip2);
System.out.println(longNumber);
Assert.assertEquals(longNumber, 3232263681L);
Assert.assertEquals(longNumber2, 0L);
String ip3 = "255.255.255.255";
long longNumber3 = IpUtils.ipToLong(ip3);
System.out.println(longNumber3);
Assert.assertEquals(longNumber3, 4294967295L);
}
@Test
public void longToIp() {
String ip = "192.168.110.1";
String ip2 = "0.0.0.0";
long longNum = 3232263681L;
String i1 = IpUtils.longToIp(longNum);
String i2 = IpUtils.longToIp(0);
Assert.assertEquals(ip, i1);
Assert.assertEquals(ip2, i2);
}
}

6
escheduler-dao/pom.xml

@ -4,7 +4,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.3-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<artifactId>escheduler-dao</artifactId>
<name>escheduler-dao</name>
@ -37,6 +37,10 @@
<groupId>org.apache.tomcat</groupId>
<artifactId>tomcat-jdbc</artifactId>
</exclusion>
<exclusion>
<artifactId>log4j-to-slf4j</artifactId>
<groupId>org.apache.logging.log4j</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>

147
escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java

@ -25,6 +25,7 @@ import cn.escheduler.common.queue.ITaskQueue;
import cn.escheduler.common.queue.TaskQueueFactory;
import cn.escheduler.common.task.subprocess.SubProcessParameters;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.common.utils.IpUtils;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.mapper.*;
@ -58,6 +59,7 @@ public class ProcessDao extends AbstractBaseDao {
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXEUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
// ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
@ -96,6 +98,12 @@ public class ProcessDao extends AbstractBaseDao {
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private WorkerServerMapper workerServerMapper;
@Autowired
private TenantMapper tenantMapper;
/**
* task queue impl
*/
@ -110,7 +118,7 @@ public class ProcessDao extends AbstractBaseDao {
*/
@Override
protected void init() {
userMapper=getMapper(UserMapper.class);
userMapper = getMapper(UserMapper.class);
processDefineMapper = getMapper(ProcessDefinitionMapper.class);
processInstanceMapper = getMapper(ProcessInstanceMapper.class);
dataSourceMapper = getMapper(DataSourceMapper.class);
@ -121,7 +129,9 @@ public class ProcessDao extends AbstractBaseDao {
udfFuncMapper = getMapper(UdfFuncMapper.class);
resourceMapper = getMapper(ResourceMapper.class);
workerGroupMapper = getMapper(WorkerGroupMapper.class);
workerServerMapper = getMapper(WorkerServerMapper.class);
taskQueue = TaskQueueFactory.getTaskQueueInstance();
tenantMapper = getMapper(TenantMapper.class);
}
@ -483,11 +493,33 @@ public class ProcessDao extends AbstractBaseDao {
processInstance.setProcessInstanceJson(processDefinition.getProcessDefinitionJson());
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
processInstance.setWorkerGroupId(command.getWorkerGroupId());
int workerGroupId = command.getWorkerGroupId() == 0 ? -1 : command.getWorkerGroupId();
processInstance.setWorkerGroupId(workerGroupId);
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
* @param tenantId
* @param userId
* @return
*/
public Tenant getTenantForProcess(int tenantId, int userId){
Tenant tenant = null;
if(tenantId >= 0){
tenant = tenantMapper.queryById(tenantId);
}
if(tenant == null){
User user = userMapper.queryById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* check command parameters is valid
@ -581,6 +613,8 @@ public class ProcessDao extends AbstractBaseDao {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXEUTION;
int runTime = processInstance.getRunTimes();
switch (commandType){
case START_PROCESS:
@ -610,6 +644,9 @@ public class ProcessDao extends AbstractBaseDao {
// find pause tasks and init task's state
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for(Integer taskId : suspendedNodeList){
// 把暂停状态初始化
initTaskInstance(this.findTaskInstanceById(taskId));
@ -621,6 +658,7 @@ public class ProcessDao extends AbstractBaseDao {
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data
@ -652,7 +690,7 @@ public class ProcessDao extends AbstractBaseDao {
default:
break;
}
processInstance.setState(ExecutionStatus.RUNNING_EXEUTION);
processInstance.setState(runStatus);
return processInstance;
}
@ -756,13 +794,16 @@ public class ProcessDao extends AbstractBaseDao {
* @param taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance){
if(taskInstance.getState().typeIsFailure() && !taskInstance.isSubProcess()){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}else{
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
if(!taskInstance.isSubProcess()){
if(taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure()){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
@ -970,17 +1011,64 @@ public class ProcessDao extends AbstractBaseDao {
}
/**
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}_${task executed by ip1},${ip2}...
*
* The tasks with the highest priority are selected by comparing the priorities of the above four levels from high to low.
*
* 流程实例优先级_流程实例id_任务优先级_任务id high <- low
* 流程实例优先级_流程实例id_任务优先级_任务id_任务执行机器ip1ip2... high <- low
*
* @param task
* @param taskInstance
* @return
*/
private String taskZkInfo(TaskInstance task) {
return String.valueOf(task.getProcessInstancePriority().ordinal()) + Constants.UNDERLINE + task.getProcessInstanceId() + Constants.UNDERLINE + task.getTaskInstancePriority().ordinal() + Constants.UNDERLINE + task.getId();
private String taskZkInfo(TaskInstance taskInstance) {
int taskWorkerGroupId = getTaskWorkerGroupId(taskInstance);
StringBuilder sb = new StringBuilder(100);
sb.append(taskInstance.getProcessInstancePriority().ordinal()).append(Constants.UNDERLINE)
.append(taskInstance.getProcessInstanceId()).append(Constants.UNDERLINE)
.append(taskInstance.getTaskInstancePriority().ordinal()).append(Constants.UNDERLINE)
.append(taskInstance.getId()).append(Constants.UNDERLINE);
if(taskWorkerGroupId > 0){
//not to find data from db
WorkerGroup workerGroup = queryWorkerGroupById(taskWorkerGroupId);
if(workerGroup == null ){
logger.info("task {} cannot find the worker group, use all worker instead.", taskInstance.getId());
sb.append(Constants.DEFAULT_WORKER_ID);
return sb.toString();
}
String ips = workerGroup.getIpList();
if(StringUtils.isBlank(ips)){
logger.error("task:{} worker group:{} parameters(ip_list) is null, this task would be running on all workers",
taskInstance.getId(), workerGroup.getId());
sb.append(Constants.DEFAULT_WORKER_ID);
return sb.toString();
}
StringBuilder ipSb = new StringBuilder(100);
String[] ipArray = ips.split(COMMA);
for (String ip : ipArray) {
long ipLong = IpUtils.ipToLong(ip);
ipSb.append(ipLong).append(COMMA);
}
if(ipSb.length() > 0) {
ipSb.deleteCharAt(ipSb.length() - 1);
}
sb.append(ipSb);
}else{
sb.append(Constants.DEFAULT_WORKER_ID);
}
return sb.toString();
}
/**
@ -1566,7 +1654,6 @@ public class ProcessDao extends AbstractBaseDao {
for (ProcessInstance processInstance:processInstanceList){
processNeedFailoverProcessInstances(processInstance);
}
}
@Transactional(value = "TransactionManager",rollbackFor = Exception.class)
@ -1633,6 +1720,36 @@ public class ProcessDao extends AbstractBaseDao {
return workerGroupMapper.queryById(workerGroupId);
}
/**
* query worker server by host
* @param host
* @return
*/
public List<WorkerServer> queryWorkerServerByHost(String host){
return workerServerMapper.queryWorkerByHost(host);
}
/**
* get task worker group id
*
* @param taskInstance
* @return
*/
public int getTaskWorkerGroupId(TaskInstance taskInstance) {
int taskWorkerGroupId = taskInstance.getWorkerGroupId();
ProcessInstance processInstance = findProcessInstanceByTaskId(taskInstance.getId());
if(processInstance == null){
logger.error("cannot find the task:{} process instance", taskInstance.getId());
return Constants.DEFAULT_WORKER_ID;
}
int processWorkerGroupId = processInstance.getWorkerGroupId();
taskWorkerGroupId = (taskWorkerGroupId <= 0 ? processWorkerGroupId : taskWorkerGroupId);
return taskWorkerGroupId;
}
}

46
escheduler-dao/src/main/java/cn/escheduler/dao/TaskRecordDao.java

@ -17,6 +17,8 @@
package cn.escheduler.dao;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.TaskRecordStatus;
import cn.escheduler.common.utils.CollectionUtils;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.dao.model.TaskRecord;
import org.apache.commons.configuration.Configuration;
@ -28,6 +30,7 @@ import org.slf4j.LoggerFactory;
import java.sql.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@ -43,7 +46,7 @@ public class TaskRecordDao {
/**
* 加载配置文件
* load conf file
*/
private static Configuration conf;
@ -56,6 +59,14 @@ public class TaskRecordDao {
}
}
/**
* get task record flag
* @return
*/
public static boolean getTaskRecordFlag(){
return conf.getBoolean(Constants.TASK_RECORD_FLAG);
}
/**
* create connection
* @return
@ -253,4 +264,37 @@ public class TaskRecordDao {
}
return recordList;
}
/**
* according to procname and procdate query task record
* @param procName
* @param procDate
* @return
*/
public static TaskRecordStatus getTaskRecordState(String procName,String procDate){
String sql = String.format("SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
,procName,procDate + "%");
List<TaskRecord> taskRecordList = getQueryResult(sql);
// contains no record and sql exception
if (CollectionUtils.isEmpty(taskRecordList)){
// exception
return TaskRecordStatus.EXCEPTION;
}else if (taskRecordList.size() > 1){
return TaskRecordStatus.EXCEPTION;
}else {
TaskRecord taskRecord = taskRecordList.get(0);
if (taskRecord == null){
return TaskRecordStatus.EXCEPTION;
}
Long targetRowCount = taskRecord.getTargetRowCount();
if (targetRowCount <= 0){
return TaskRecordStatus.FAILURE;
}else {
return TaskRecordStatus.SUCCESS;
}
}
}
}

13
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/DataSourceMapper.java

@ -216,4 +216,17 @@ public interface DataSourceMapper {
@SelectProvider(type = DataSourceMapperProvider.class, method = "queryDatasourceExceptUserId")
List<DataSource> queryDatasourceExceptUserId(@Param("userId") int userId);
@Results(value = {
@Result(property = "id", column = "id", id = true, javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "name", column = "name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "note", column = "note", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "type", column = "type", typeHandler = EnumOrdinalTypeHandler.class, javaType = DbType.class, jdbcType = JdbcType.INTEGER),
@Result(property = "userId", column = "user_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "connectionParams", column = "connection_params", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
@Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE)
})
@SelectProvider(type = DataSourceMapperProvider.class, method = "listAllDataSourceByType")
List<DataSource> listAllDataSourceByType(@Param("type") Integer type);
}

19
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/DataSourceMapperProvider.java

@ -175,8 +175,7 @@ public class DataSourceMapperProvider {
}
/**
* 查询总的数据源数目
*
* Query the total number of data sources
* @param parameter
* @return
*/
@ -228,4 +227,20 @@ public class DataSourceMapperProvider {
WHERE("user_id <> #{userId}");
}}.toString();
}
/**
* list all data source by type
*
* @param parameter
* @return
*/
public String listAllDataSourceByType(Map<String, Object> parameter) {
return new SQL() {{
SELECT("*");
FROM(TABLE_NAME);
WHERE("type = #{type}");
}}.toString();
}
}

5
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java

@ -95,6 +95,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "connects", column = "connects", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "receivers", column = "receivers", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "receiversCc", column = "receivers_cc", javaType = String.class, jdbcType = JdbcType.VARCHAR)
@ -123,6 +124,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "locations", column = "locations", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "connects", column = "connects", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR)
})
@SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryByDefineName")
@ -160,6 +162,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "flag", column = "flag", typeHandler = EnumOrdinalTypeHandler.class, javaType = Flag.class, jdbcType = JdbcType.TINYINT),
@Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR)
})
@SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryAllDefinitionList")
@ -187,6 +190,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "scheduleReleaseState", column = "schedule_release_state", typeHandler = EnumOrdinalTypeHandler.class, javaType = ReleaseState.class, jdbcType = JdbcType.TINYINT),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR)
})
@SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryDefineListPaging")
@ -216,6 +220,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "connects", column = "connects", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR)
})
@SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryDefinitionListByIdList")

4
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java

@ -56,6 +56,7 @@ public class ProcessDefinitionMapperProvider {
VALUES("`create_time`", "#{processDefinition.createTime}");
VALUES("`update_time`", "#{processDefinition.updateTime}");
VALUES("`timeout`", "#{processDefinition.timeout}");
VALUES("`tenant_id`", "#{processDefinition.tenantId}");
VALUES("`flag`", EnumFieldUtil.genFieldStr("processDefinition.flag", ReleaseState.class));
VALUES("`user_id`", "#{processDefinition.userId}");
@ -102,6 +103,7 @@ public class ProcessDefinitionMapperProvider {
SET("`create_time`=#{processDefinition.createTime}");
SET("`update_time`=#{processDefinition.updateTime}");
SET("`timeout`=#{processDefinition.timeout}");
SET("`tenant_id`=#{processDefinition.tenantId}");
SET("`flag`="+EnumFieldUtil.genFieldStr("processDefinition.flag", Flag.class));
SET("`user_id`=#{processDefinition.userId}");
@ -189,7 +191,7 @@ public class ProcessDefinitionMapperProvider {
if(userId != null && 0 != Integer.parseInt(userId.toString())){
WHERE("td.user_id = #{userId}");
}
ORDER_BY(" td.update_time desc limit #{offset},#{pageSize} ");
ORDER_BY(" sc.schedule_release_state desc,td.update_time desc limit #{offset},#{pageSize} ");
}}.toString();
}
/**

11
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapper.java

@ -97,6 +97,7 @@ public interface ProcessInstanceMapper {
@Result(property = "queue", column = "queue", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@SelectProvider(type = ProcessInstanceMapperProvider.class, method = "queryDetailById")
@ -136,6 +137,7 @@ public interface ProcessInstanceMapper {
@Result(property = "dependenceScheduleTimes", column = "dependence_schedule_times", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@SelectProvider(type = ProcessInstanceMapperProvider.class, method = "queryById")
@ -175,6 +177,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -214,6 +217,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -262,6 +266,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -359,6 +364,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -452,6 +458,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -497,6 +504,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -542,6 +550,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -585,6 +594,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@SelectProvider(type = ProcessInstanceMapperProvider.class, method = "queryLastRunningProcess")
@ -628,6 +638,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@SelectProvider(type = ProcessInstanceMapperProvider.class, method = "queryLastManualProcess")

15
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java

@ -69,6 +69,7 @@ public class ProcessInstanceMapperProvider {
VALUES("`executor_id`", "#{processInstance.executorId}");
VALUES("`worker_group_id`", "#{processInstance.workerGroupId}");
VALUES("`timeout`", "#{processInstance.timeout}");
VALUES("`tenant_id`", "#{processInstance.tenantId}");
VALUES("`process_instance_priority`", EnumFieldUtil.genFieldStr("processInstance.processInstancePriority", Priority.class));
}
}.toString();
@ -141,6 +142,7 @@ public class ProcessInstanceMapperProvider {
SET("`dependence_schedule_times`=#{processInstance.dependenceScheduleTimes}");
SET("`is_sub_process`="+EnumFieldUtil.genFieldStr("processInstance.isSubProcess", Flag.class));
SET("`executor_id`=#{processInstance.executorId}");
SET("`tenant_id`=#{processInstance.tenantId}");
SET("`worker_group_id`=#{processInstance.workerGroupId}");
SET("`timeout`=#{processInstance.timeout}");
@ -220,11 +222,11 @@ public class ProcessInstanceMapperProvider {
public String queryDetailById(Map<String, Object> parameter) {
return new SQL() {
{
SELECT("inst.*,q.queue_name as queue,t.tenant_code,UNIX_TIMESTAMP(inst.end_time)-UNIX_TIMESTAMP(inst.start_time) as duration");
SELECT("inst.*,UNIX_TIMESTAMP(inst.end_time)-UNIX_TIMESTAMP(inst.start_time) as duration");
FROM(TABLE_NAME + " inst, t_escheduler_user u,t_escheduler_tenant t,t_escheduler_queue q");
FROM(TABLE_NAME + " inst");
WHERE("inst.executor_id = u.id AND u.tenant_id = t.id AND t.queue_id = q.id AND inst.id = #{processId}");
WHERE("inst.id = #{processId}");
}
}.toString();
}
@ -402,7 +404,12 @@ public class ProcessInstanceMapperProvider {
FROM(TABLE_NAME);
WHERE("`host` = #{host} and `state` in (" + strStates.toString() +")");
Object host = parameter.get("host");
if(host != null && StringUtils.isNotEmpty(host.toString())){
WHERE("`host` = #{host} ");
}
WHERE("`state` in (" + strStates.toString() +")");
ORDER_BY("`id` asc");

18
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ResourceMapper.java

@ -274,5 +274,21 @@ public interface ResourceMapper {
@SelectProvider(type = ResourceMapperProvider.class, method = "queryTenantCodeByResourceName")
String queryTenantCodeByResourceName(@Param("resName") String resName);
/**
* query resource list that the appointed user has permission
* @param type
* @return
*/
@Results(value = {@Result(property = "id", column = "id", id = true, javaType = int.class, jdbcType = JdbcType.INTEGER),
@Result(property = "alias", column = "alias", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "fileName", column = "file_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "desc", column = "desc", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "userId", column = "user_id", javaType = int.class, jdbcType = JdbcType.INTEGER),
@Result(property = "type", column = "type", typeHandler = EnumOrdinalTypeHandler.class, javaType = ResourceType.class, jdbcType = JdbcType.TINYINT),
@Result(property = "size", column = "size", javaType = Long.class, jdbcType = JdbcType.BIGINT),
@Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
@Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE)
})
@SelectProvider(type = ResourceMapperProvider.class, method = "listAllResourceByType")
List<Resource> listAllResourceByType(@Param("type") Integer type);
}

16
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ResourceMapperProvider.java

@ -118,6 +118,7 @@ public class ResourceMapperProvider {
SET("`alias` = #{resource.alias}");
SET("`desc` = #{resource.desc}");
SET("`update_time` = #{resource.updateTime}");
SET("`size` = #{resource.size}");
WHERE("`id` = #{resource.id}");
}}.toString();
}
@ -294,4 +295,19 @@ public class ResourceMapperProvider {
WHERE("type = #{type} and user_id = #{userId}");
}}.toString();
}
/**
* list all resource by type
*
* @param parameter
* @return
*/
public String listAllResourceByType(Map<String, Object> parameter) {
return new SQL() {{
SELECT("*");
FROM(TABLE_NAME);
WHERE("type = #{type}");
}}.toString();
}
}

7
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TaskInstanceMapperProvider.java

@ -228,7 +228,12 @@ public class TaskInstanceMapperProvider {
SELECT("*, UNIX_TIMESTAMP(end_time)-UNIX_TIMESTAMP(start_time) as duration");
FROM(TABLE_NAME);
WHERE("`host` = #{host} and `state` in (" + strStates.toString() +")");
Object host = parameter.get("host");
if(host != null && StringUtils.isNotEmpty(host.toString())){
WHERE("`host` = #{host} ");
}
WHERE("`state` in (" + strStates.toString() +")");
ORDER_BY("`id` asc");
}
}.toString();

2
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/UserMapperProvider.java

@ -187,7 +187,6 @@ public class UserMapperProvider {
return new SQL() {{
SELECT("count(0)");
FROM(TABLE_NAME);
WHERE("user_type = 1");
Object searchVal = parameter.get("searchVal");
if(searchVal != null && StringUtils.isNotEmpty(searchVal.toString())){
WHERE( " user_name like concat('%', #{searchVal}, '%') ");
@ -209,7 +208,6 @@ public class UserMapperProvider {
FROM(TABLE_NAME + " u ");
LEFT_OUTER_JOIN("t_escheduler_tenant t on u.tenant_id = t.id");
LEFT_OUTER_JOIN("t_escheduler_queue q on t.queue_id = q.id");
WHERE("u.user_type = 1");
Object searchVal = parameter.get("searchVal");
if(searchVal != null && StringUtils.isNotEmpty(searchVal.toString())){
WHERE( " u.user_name like concat('%', #{searchVal}, '%') ");

17
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/WorkerServerMapper.java

@ -42,6 +42,23 @@ public interface WorkerServerMapper {
@SelectProvider(type = WorkerServerMapperProvider.class, method = "queryAllWorker")
List<WorkerServer> queryAllWorker();
/**
* query worker list
*
* @return
*/
@Results(value = {
@Result(property = "id", column = "id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "host", column = "host", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "port", column = "port", javaType = int.class, jdbcType = JdbcType.INTEGER),
@Result(property = "zkDirectory", column = "zk_directory", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "resInfo", column = "res_info", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "createTime", column = "create_time", javaType = Date.class, jdbcType = JdbcType.TIMESTAMP),
@Result(property = "lastHeartbeatTime", column = "last_heartbeat_time", javaType = Date.class, jdbcType = JdbcType.TIMESTAMP)
})
@SelectProvider(type = WorkerServerMapperProvider.class, method = "queryWorkerByHost")
List<WorkerServer> queryWorkerByHost(@Param("host") String host);
/**
* insert worker server
*

15
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/WorkerServerMapperProvider.java

@ -37,6 +37,21 @@ public class WorkerServerMapperProvider {
}}.toString();
}
/**
* query worker list
* @return
*/
public String queryWorkerByHost(Map<String, Object> parameter) {
return new SQL() {{
SELECT("*");
FROM(TABLE_NAME);
WHERE("host = #{host}");
}}.toString();
}
/**
* insert worker server
* @param parameter

10
escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessData.java

@ -39,6 +39,8 @@ public class ProcessData {
private int timeout;
private int tenantId;
public ProcessData() {
}
@ -92,4 +94,12 @@ public class ProcessData {
public void setTimeout(int timeout) {
this.timeout = timeout;
}
public int getTenantId() {
return tenantId;
}
public void setTenantId(int tenantId) {
this.tenantId = tenantId;
}
}

13
escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessDefinition.java

@ -141,6 +141,11 @@ public class ProcessDefinition {
*/
private int timeout;
/**
* tenant id
*/
private int tenantId;
public String getName() {
return name;
@ -354,7 +359,15 @@ public class ProcessDefinition {
", receiversCc='" + receiversCc + '\'' +
", scheduleReleaseState=" + scheduleReleaseState +
", timeout=" + timeout +
", tenantId=" + tenantId +
'}';
}
public int getTenantId() {
return tenantId;
}
public void setTenantId(int tenantId) {
this.tenantId = tenantId;
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save