Browse Source

Merge remote-tracking branch 'upstream/dev' into dev

pull/2/head
lidongdai 5 years ago
parent
commit
83ca4b9c6e
  1. 41
      Dockerfile
  2. 2
      README.md
  3. 53
      docs/zh_CN/1.0.3-release.md
  4. 96
      docs/zh_CN/EasyScheduler-FAQ.md
  5. 1
      docs/zh_CN/README.md
  6. 13
      docs/zh_CN/SUMMARY.md
  7. BIN
      docs/zh_CN/images/complement.png
  8. BIN
      docs/zh_CN/images/create-queue.png
  9. BIN
      docs/zh_CN/images/dag1.png
  10. BIN
      docs/zh_CN/images/dag2.png
  11. BIN
      docs/zh_CN/images/dag3.png
  12. BIN
      docs/zh_CN/images/dag4.png
  13. BIN
      docs/zh_CN/images/depend-node.png
  14. BIN
      docs/zh_CN/images/depend-node2.png
  15. BIN
      docs/zh_CN/images/depend-node3.png
  16. BIN
      docs/zh_CN/images/file-manage.png
  17. BIN
      docs/zh_CN/images/gant-pic.png
  18. BIN
      docs/zh_CN/images/global_parameter.png
  19. BIN
      docs/zh_CN/images/hive_edit.png
  20. BIN
      docs/zh_CN/images/hive_edit2.png
  21. BIN
      docs/zh_CN/images/instance-detail.png
  22. BIN
      docs/zh_CN/images/instance-list.png
  23. BIN
      docs/zh_CN/images/local_parameter.png
  24. BIN
      docs/zh_CN/images/master-jk.png
  25. BIN
      docs/zh_CN/images/mysql-jk.png
  26. BIN
      docs/zh_CN/images/mysql_edit.png
  27. BIN
      docs/zh_CN/images/postgressql_edit.png
  28. BIN
      docs/zh_CN/images/project.png
  29. BIN
      docs/zh_CN/images/run-work.png
  30. BIN
      docs/zh_CN/images/sql-node.png
  31. BIN
      docs/zh_CN/images/sql-node2.png
  32. BIN
      docs/zh_CN/images/task-list.png
  33. BIN
      docs/zh_CN/images/task-log.png
  34. BIN
      docs/zh_CN/images/task-log2.png
  35. BIN
      docs/zh_CN/images/time-schedule.png
  36. BIN
      docs/zh_CN/images/time-schedule2.png
  37. BIN
      docs/zh_CN/images/worker-jk.png
  38. BIN
      docs/zh_CN/images/worker1.png
  39. BIN
      docs/zh_CN/images/zk-jk.png
  40. 4
      docs/zh_CN/前端部署文档.md
  41. 10
      docs/zh_CN/后端部署文档.md
  42. 50
      docs/zh_CN/快速上手.md
  43. 707
      docs/zh_CN/系统使用手册.md
  44. 2
      escheduler-alert/pom.xml
  45. 14
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java
  46. 167
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/EnterpriseWeChatUtils.java
  47. 34
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/FuncUtils.java
  48. 22
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java
  49. 14
      escheduler-alert/src/main/resources/alert.properties
  50. 39
      escheduler-alert/src/main/resources/mail_templates/alert_mail_template.ftl
  51. 110
      escheduler-alert/src/test/java/cn/escheduler/alert/utils/EnterpriseWeChatUtilsTest.java
  52. 6
      escheduler-api/pom.xml
  53. 2
      escheduler-api/src/main/java/cn/escheduler/api/controller/AccessTokenController.java
  54. 2
      escheduler-api/src/main/java/cn/escheduler/api/controller/AlertGroupController.java
  55. 2
      escheduler-api/src/main/java/cn/escheduler/api/controller/DataSourceController.java
  56. 52
      escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessDefinitionController.java
  57. 34
      escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessInstanceController.java
  58. 2
      escheduler-api/src/main/java/cn/escheduler/api/controller/ProjectController.java
  59. 2
      escheduler-api/src/main/java/cn/escheduler/api/controller/QueueController.java
  60. 6
      escheduler-api/src/main/java/cn/escheduler/api/controller/ResourcesController.java
  61. 31
      escheduler-api/src/main/java/cn/escheduler/api/controller/SchedulerController.java
  62. 2
      escheduler-api/src/main/java/cn/escheduler/api/controller/TaskInstanceController.java
  63. 3
      escheduler-api/src/main/java/cn/escheduler/api/controller/TenantController.java
  64. 23
      escheduler-api/src/main/java/cn/escheduler/api/controller/UsersController.java
  65. 2
      escheduler-api/src/main/java/cn/escheduler/api/controller/WorkerGroupController.java
  66. 12
      escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java
  67. 2
      escheduler-api/src/main/java/cn/escheduler/api/service/ExecutorService.java
  68. 123
      escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java
  69. 80
      escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java
  70. 11
      escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java
  71. 73
      escheduler-api/src/main/java/cn/escheduler/api/service/ResourcesService.java
  72. 49
      escheduler-api/src/main/java/cn/escheduler/api/service/SchedulerService.java
  73. 25
      escheduler-api/src/main/java/cn/escheduler/api/service/TenantService.java
  74. 24
      escheduler-api/src/main/java/cn/escheduler/api/service/UsersService.java
  75. 1
      escheduler-api/src/main/resources/i18n/messages.properties
  76. 1
      escheduler-api/src/main/resources/i18n/messages_en_US.properties
  77. 1
      escheduler-api/src/main/resources/i18n/messages_zh_CN.properties
  78. 24
      escheduler-api/src/test/java/cn/escheduler/api/controller/ResourcesControllerTest.java
  79. 23
      escheduler-api/src/test/java/cn/escheduler/api/service/ProcessDefinitionServiceTest.java
  80. 12
      escheduler-api/src/test/java/cn/escheduler/api/service/ProcessInstanceServiceTest.java
  81. 45
      escheduler-common/pom.xml
  82. 10
      escheduler-common/src/main/java/cn/escheduler/common/Constants.java
  83. 35
      escheduler-common/src/main/java/cn/escheduler/common/enums/TaskRecordStatus.java
  84. 8
      escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java
  85. 3
      escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java
  86. 14
      escheduler-common/src/main/java/cn/escheduler/common/task/sql/SqlParameters.java
  87. 9
      escheduler-common/src/main/java/cn/escheduler/common/utils/DependentUtils.java
  88. 31
      escheduler-common/src/main/java/cn/escheduler/common/utils/HadoopUtils.java
  89. 70
      escheduler-common/src/main/java/cn/escheduler/common/utils/IpUtils.java
  90. 9
      escheduler-common/src/main/java/cn/escheduler/common/utils/JSONUtils.java
  91. 2
      escheduler-common/src/main/java/cn/escheduler/common/utils/OSUtils.java
  92. 14
      escheduler-common/src/main/java/cn/escheduler/common/utils/ParameterUtils.java
  93. 17
      escheduler-common/src/main/java/cn/escheduler/common/utils/PropertyUtils.java
  94. 35
      escheduler-common/src/main/java/cn/escheduler/common/utils/dependent/DependentDateUtils.java
  95. 7
      escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/BusinessTimeUtils.java
  96. 1
      escheduler-common/src/main/resources/zookeeper.properties
  97. 31
      escheduler-common/src/test/java/cn/escheduler/common/utils/DependentUtilsTest.java
  98. 8
      escheduler-dao/pom.xml
  99. 67
      escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java
  100. 46
      escheduler-dao/src/main/java/cn/escheduler/dao/TaskRecordDao.java
  101. Some files were not shown because too many files have changed in this diff Show More

41
Dockerfile

@ -1,41 +0,0 @@
#Maintin by jimmy
#Email: zhengge2012@gmail.com
FROM anapsix/alpine-java:8_jdk
WORKDIR /tmp
RUN wget http://archive.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz
RUN tar -zxvf apache-maven-3.6.1-bin.tar.gz && rm apache-maven-3.6.1-bin.tar.gz
RUN mv apache-maven-3.6.1 /usr/lib/mvn
RUN chown -R root:root /usr/lib/mvn
RUN ln -s /usr/lib/mvn/bin/mvn /usr/bin/mvn
RUN wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz
RUN tar -zxvf zookeeper-3.4.6.tar.gz
RUN mv zookeeper-3.4.6 /opt/zookeeper
RUN rm -rf zookeeper-3.4.6.tar.gz
RUN echo "export ZOOKEEPER_HOME=/opt/zookeeper" >>/etc/profile
RUN echo "export PATH=$PATH:$ZOOKEEPER_HOME/bin" >>/etc/profile
ADD conf/zoo.cfg /opt/zookeeper/conf/zoo.cfg
#RUN source /etc/profile
#RUN zkServer.sh start
RUN apk add --no-cache git npm nginx mariadb mariadb-client mariadb-server-utils pwgen
WORKDIR /opt
RUN git clone https://github.com/analysys/EasyScheduler.git
WORKDIR /opt/EasyScheduler
RUN mvn -U clean package assembly:assembly -Dmaven.test.skip=true
RUN mv /opt/EasyScheduler/target/escheduler-1.0.0-SNAPSHOT /opt/easyscheduler
WORKDIR /opt/EasyScheduler/escheduler-ui
RUN npm install
RUN npm audit fix
RUN npm run build
RUN mkdir -p /opt/escheduler/front/server
RUN cp -rfv dist/* /opt/escheduler/front/server
WORKDIR /
RUN rm -rf /opt/EasyScheduler
#configure mysql server https://github.com/yobasystems/alpine-mariadb/tree/master/alpine-mariadb-amd64
ADD conf/run.sh /scripts/run.sh
RUN mkdir /docker-entrypoint-initdb.d && \
mkdir /scripts/pre-exec.d && \
mkdir /scripts/pre-init.d && \
chmod -R 755 /scripts
RUN rm -rf /var/cache/apk/*
EXPOSE 8888
ENTRYPOINT ["/scripts/run.sh"]

2
README.md

@ -50,7 +50,7 @@ Easy Scheduler
### 近期研发计划
EasyScheduler的工作计划:<a href="https://github.com/analysys/EasyScheduler/projects/1" target="_blank">研发计划</a> ,其中 In Develop卡片下是1.0.2版本的功能,TODO卡片是待做事项(包括 feature ideas)
EasyScheduler的工作计划:<a href="https://github.com/analysys/EasyScheduler/projects/1" target="_blank">研发计划</a> ,其中 In Develop卡片下是1.1.0版本的功能,TODO卡片是待做事项(包括 feature ideas)
### 贡献代码

53
docs/zh_CN/1.0.3-release.md

@ -0,0 +1,53 @@
Easy Scheduler Release 1.0.3
===
Easy Scheduler 1.0.3是1.x系列中的第四个版本。
新特性:
===
- [[EasyScheduler-254](https://github.com/analysys/EasyScheduler/issues/254)] 流程定义删除和批量删除
- [[EasyScheduler-347](https://github.com/analysys/EasyScheduler/issues/347)] 任务依赖增加“今日”
- [[EasyScheduler-273](https://github.com/analysys/EasyScheduler/issues/273)]sql任务添加title
- [[EasyScheduler-247](https://github.com/analysys/EasyScheduler/issues/247)]API在线文档
- [[EasyScheduler-319](https://github.com/analysys/EasyScheduler/issues/319)] 单机容错
- [[EasyScheduler-253](https://github.com/analysys/EasyScheduler/issues/253)] 项目增加流程定义统计和运行流程实例统计
- [[EasyScheduler-292](https://github.com/analysys/EasyScheduler/issues/292)] 启用SSL的邮箱发送邮件
- [[EasyScheduler-77](https://github.com/analysys/EasyScheduler/issues/77)] 定时管理、工作流定义添加删除功能
- [[EasyScheduler-380](https://github.com/analysys/EasyScheduler/issues/380)] 服务监控功能
- [[EasyScheduler-380](https://github.com/analysys/EasyScheduler/issues/382)] 项目增加流程定义统计和运行流程实例统计
增强:
===
- [[EasyScheduler-192](https://github.com/analysys/EasyScheduler/issues/192)] 租户删除前可以考虑校验租户和资源
- [[EasyScheduler-376](https://github.com/analysys/EasyScheduler/issues/294)] 删除实例时候,没有删除对应zookeeper队列里的任务
- [[EasyScheduler-185](https://github.com/analysys/EasyScheduler/issues/185)] 项目删除工作流定义还存在
- [[EasyScheduler-206](https://github.com/analysys/EasyScheduler/issues/206)] 优化部署,完善docker化支持
- [[EasyScheduler-381](https://github.com/analysys/EasyScheduler/issues/381)] 前端一键部署脚本支持ubuntu
修复:
===
- [[EasyScheduler-255](https://github.com/analysys/EasyScheduler/issues/255)]子父流程全局变量覆盖,子流程继承父流程全局变量并可以重写
- [[EasyScheduler-256](https://github.com/analysys/EasyScheduler/issues/256)]子父流程参数显示异常
- [[EasyScheduler-186](https://github.com/analysys/EasyScheduler/issues/186)]所有查询中只要输入%会返回所有数据
- [[EasyScheduler-185](https://github.com/analysys/EasyScheduler/issues/185)]项目删除工作流定义还存在
- [[EasyScheduler-266](https://github.com/analysys/EasyScheduler/issues/266)]Stop process return: process definition 1 not on line
- [[EasyScheduler-300](https://github.com/analysys/EasyScheduler/issues/300)] 超时告警时间单位
- [[EasyScheduler-235](https://github.com/analysys/EasyScheduler/issues/235)]nginx超时连接问题修复
- [[EasyScheduler-272](https://github.com/analysys/EasyScheduler/issues/272)]管理员不能生成token
- [[EasyScheduler-272](https://github.com/analysys/EasyScheduler/issues/277)]save global parameters error
- [[EasyScheduler-183](https://github.com/analysys/EasyScheduler/issues/183)]创建中文名称的Worker分组报错
- [[EasyScheduler-377](https://github.com/analysys/EasyScheduler/issues/377)]资源文件重命名只修改描述时会报名称已存在错误
- [[EasyScheduler-235](https://github.com/analysys/EasyScheduler/issues/235)]创建spark数据源,点击“测试连接”,系统回退回到登入页面
- [[EasyScheduler-83](https://github.com/analysys/EasyScheduler/issues/83)]1.0.1版本启动api server报错
- [[EasyScheduler-379](https://github.com/analysys/EasyScheduler/issues/379)]跨天恢复执行定时任务时,时间参数不对
- [[EasyScheduler-383](https://github.com/analysys/EasyScheduler/issues/383)]sql邮件不显示前面的空行
感谢:
===
最后但最重要的是,没有以下伙伴的贡献就没有新版本的诞生:
Baoqi, jimmy201602, samz406, petersear, millionfor, hyperknob, fanguanqun, yangqinlong, qq389401879,
feloxx, coding-now, hymzcn, nysyxxg, chgxtony
以及微信群里众多的热心伙伴!在此非常感谢!

96
docs/zh_CN/EasyScheduler-FAQ.md

@ -0,0 +1,96 @@
Q:单机运行服务老挂,应该是内存不够,测试机器4核8G。生产环境需要分布式,如果单机的话建议的配置是?
A: Easy Scheduler有5个服务组成,这些服务本身需要的内存和cpu不多,
| 服务 | 内存 | cpu核数 |
| ------------ | ---- | ------- |
| MasterServer | 2G | 2核 |
| WorkerServer | 2G | 2核 |
| ApiServer | 512M | 1核 |
| AlertServer | 512M | 1核 |
| LoggerServer | 512M | 1核 |
注意:由于如果任务较多,WorkServer所在机器建议物理内存在16G以上
---
Q: 管理员为什么不能创建项目?
A: 管理员目前属于"纯管理", 没有租户,即没有linux上对应的用户,所以没有执行权限, 但是有所有的查看权限。如果需要创建项目等业务操作,请使用管理员创建租户和普通用户,然后使用普通用户登录进行操作
---
Q: 系统支持哪些邮箱?
A: 支持绝大多数邮箱,qq、163、126、139、outlook、aliyun等皆可支持
---
Q:常用的系统变量时间参数有哪些,如何使用?
A: 请参考使用手册中的系统参数
---
Q:pip install kazoo 这个安装报错。是必须安装的吗?
A: 这个是python连接zookeeper需要使用到的
---
Q: 如果alert、api、logger服务任意一个宕机,任何还会正常执行吧
A: 不影响,影响正在运行中的任务的服务有Master和Worker服务
---
Q: 这个怎么指定机器运行任务的啊 」
A: 通过worker分组: 这个流程只能在指定的机器组里执行。默认是Default,可以在任一worker上执行。
---
Q: 跨用户的任务依赖怎么实现呢, 比如A用户写了一个任务,B用户需要依赖这个任务
就比如说 我们数仓组 写了一个 中间宽表的任务, 其他业务部门想要使用这个中间表的时候,他们应该是另外一个用户,怎么依赖这个中间表呢
A: 有两种情况,一个是要运行这个宽表任务,可以使用子工作流把宽表任务放到自己的工作流里面。另一个是检查这个宽表任务有没有完成,可以使用依赖节点来检查这个宽表任务在指定的时间周期有没有完成。
---
Q: 启动WorkerServer服务时不能正常启动,报以下信息是什么原因?
```
[INFO] 2019-05-06 16:39:31.492 cn.escheduler.server.zk.ZKWorkerClient:[155] - register failure , worker already started on : 127.0.0.1, please wait for a moment and try again
```
A:Worker/Master Server在启动时,会向Zookeeper注册自己的启动信息,是Zookeeper的临时节点,如果两次启动时间间隔较短的情况,上次启动的Worker/Master Server在Zookeeper的会话还未过期,会出现上述信息,处理办法是等待session过期,一般是1分钟左右
----
Q: 编译时escheduler-grpc模块一直报错:Information:java: Errors occurred while compiling module 'escheduler-rpc', 找不到LogParameter、RetStrInfo、RetByteInfo等class类
A: 这是因为rpc源码包是google Grpc实现的,需要使用maven进行编译,在根目录下执行:mvn -U clean package assembly:assembly -Dmaven.test.skip=true , 然后刷新下整个项目
----
Q:EasyScheduler支持windows上运行么?
A: 建议在Ubuntu、Centos上运行,暂不支持windows上运行,不过windows上可以进行编译。开发调试的话建议Ubuntu或者mac上进行。
-----
Q:任务为什么不执行?
A: 不执行的原因:
查看command表里有没有内容?
查看Master server的运行日志:
查看Worker Server的运行日志

1
docs/zh_CN/README.md

@ -63,3 +63,4 @@ The fastest way to get response from our developers is to submit issues, or ad

13
docs/zh_CN/SUMMARY.md

@ -8,7 +8,14 @@
* 后端部署文档
* [准备工作](后端部署文档.md#1、准备工作)
* [部署](后端部署文档.md#2、部署)
* [系统使用手册](系统使用手册.md#使用手册)
* [快速上手](快速上手.md#快速上手)
* 系统使用手册
* [快速上手](系统使用手册.md#快速上手)
* [操作指南](系统使用手册.md#操作指南)
* [安全中心(权限系统)](系统使用手册.md#安全中心(权限系统))
* [监控中心](系统使用手册.md#监控中心)
* [任务节点类型和参数设置](系统使用手册.md#任务节点类型和参数设置)
* [系统参数](系统使用手册.md#系统参数)
* [系统架构设计](系统架构设计.md#系统架构设计)
* 前端开发文档
* [开发环境搭建](前端开发文档.md#开发环境搭建)
@ -22,9 +29,13 @@
* [开发环境搭建](后端开发文档.md#项目编译)
* [自定义任务插件文档](任务插件开发.md#任务插件开发)
* [接口文档](http://52.82.13.76:8888/easyscheduler/doc.html?language=zh_CN&lang=cn)
* FAQ
* [FAQ](EasyScheduler-FAQ.md)
* 系统版本升级文档
* [版本升级](升级文档.md)
* 历次版本发布内容
* [1.0.3 release](1.0.3-release.md)
* [1.0.2 release](1.0.2-release.md)
* [1.0.1 release](1.0.1-release.md)
* [1.0.0 release 正式开源]

BIN
docs/zh_CN/images/complement.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

BIN
docs/zh_CN/images/create-queue.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

BIN
docs/zh_CN/images/dag1.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 278 KiB

BIN
docs/zh_CN/images/dag2.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 243 KiB

BIN
docs/zh_CN/images/dag3.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 267 KiB

BIN
docs/zh_CN/images/dag4.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

BIN
docs/zh_CN/images/depend-node.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

BIN
docs/zh_CN/images/depend-node2.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 243 KiB

BIN
docs/zh_CN/images/depend-node3.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 215 KiB

BIN
docs/zh_CN/images/file-manage.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

BIN
docs/zh_CN/images/gant-pic.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 179 KiB

BIN
docs/zh_CN/images/global_parameter.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 101 KiB

After

Width:  |  Height:  |  Size: 113 KiB

BIN
docs/zh_CN/images/hive_edit.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 46 KiB

BIN
docs/zh_CN/images/hive_edit2.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 47 KiB

BIN
docs/zh_CN/images/instance-detail.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 245 KiB

BIN
docs/zh_CN/images/instance-list.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 193 KiB

BIN
docs/zh_CN/images/local_parameter.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

After

Width:  |  Height:  |  Size: 25 KiB

BIN
docs/zh_CN/images/master-jk.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 114 KiB

BIN
docs/zh_CN/images/mysql-jk.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

BIN
docs/zh_CN/images/mysql_edit.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 100 KiB

After

Width:  |  Height:  |  Size: 47 KiB

BIN
docs/zh_CN/images/postgressql_edit.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

BIN
docs/zh_CN/images/project.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

BIN
docs/zh_CN/images/run-work.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

BIN
docs/zh_CN/images/sql-node.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 280 KiB

BIN
docs/zh_CN/images/sql-node2.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 292 KiB

BIN
docs/zh_CN/images/task-list.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 143 KiB

BIN
docs/zh_CN/images/task-log.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

BIN
docs/zh_CN/images/task-log2.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 134 KiB

BIN
docs/zh_CN/images/time-schedule.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

BIN
docs/zh_CN/images/time-schedule2.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

BIN
docs/zh_CN/images/worker-jk.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 119 KiB

BIN
docs/zh_CN/images/worker1.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

BIN
docs/zh_CN/images/zk-jk.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

4
docs/zh_CN/前端部署文档.md

@ -5,9 +5,9 @@
## 1、准备工作
#### 下载安装包
目前最新安装包版本是1.0.2,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/)
请下载最新版本的安装包,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/)
下载 escheduler-ui-1.0.2.tar.gz 后,解压`tar -zxvf escheduler-ui-1.0.2.tar.gz ./`后,进入`escheduler-ui`目录
下载 escheduler-ui-x.x.x.tar.gz 后,解压`tar -zxvf escheduler-ui-x.x.x.tar.gz ./`后,进入`escheduler-ui`目录

10
docs/zh_CN/后端部署文档.md

@ -4,7 +4,7 @@
## 1、准备工作
目前最新安装包版本是1.0.2,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/) ,下载escheduler-backend-1.0.2.tar.gz(后端简称escheduler-backend),escheduler-ui-1.0.2.tar.gz(前端简称escheduler-ui)
请下载最新版本的安装包,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/) ,下载escheduler-backend-x.x.x.tar.gz(后端简称escheduler-backend),escheduler-ui-x.x.x.tar.gz(前端简称escheduler-ui)
#### 准备一: 基础软件安装(必装项请自行安装)
@ -101,6 +101,12 @@ install.sh : 一键部署脚本
- 修改部署参数(根据自己服务器及业务情况):
- 修改 **install.sh**中的各参数,替换成自身业务所需的值
- monitorServerState 开关变量,在1.0.3版本中增加,控制是否启动自启动脚本(监控master,worker状态,如果掉线会自动启动)
默认值为"false"表示不启动自启动脚本,如果需要启动改为"true"
- hdfsStartupSate 开关变量,控制是否启动hdfs
默认值为"false"表示不启动hdfs
如果需要启动改为"true",启动hdfs需要自行创建hdfs根路径,也就是install.sh中的 hdfsPath
- 如果使用hdfs相关功能,需要拷贝**hdfs-site.xml**和**core-site.xml**到conf目录下
@ -143,7 +149,7 @@ install.sh : 一键部署脚本
### 2.2 编译源码来部署
将源码包release版本1.0.2下载后,解压进入根目录
将源码包release版本下载后,解压进入根目录
* 执行编译命令:

50
docs/zh_CN/快速上手.md

@ -0,0 +1,50 @@
# 快速上手
* 管理员用户登录
>地址:192.168.xx.xx:8888 用户名密码:admin/escheduler123
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/login.jpg" width="60%" />
</p>
* 创建队列
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/create-queue.png" width="60%" />
</p>
* 创建租户
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/addtenant.png" width="60%" />
</p>
* 创建普通用户
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/useredit2.png" width="60%" />
</p>
* 创建告警组
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mail_edit.png" width="60%" />
</p>
* 使用普通用户登录
> 点击右上角用户名“退出”,重新使用普通用户登录。
* 项目管理->创建项目->点击项目名称
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/project.png" width="60%" />
</p>
* 点击工作流定义->创建工作流定义->上线流程定义
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dag1.png" width="60%" />
</p>
* 运行流程定义->点击工作流实例->点击流程实例名称->双击任务节点->查看任务执行日志
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/task-log.png" width="60%" />
</p>

707
docs/zh_CN/系统使用手册.md

@ -1,213 +1,160 @@
# 使用手册
# 系统使用手册
## 登录
- 输入http://192.168.xx.xx:8888/view/login/index.html 网址,输入用户名:admin,密码:escheduler123 登录
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/login.jpg" width="60%" />
</p>
## 快速上手
- 登录之后每个页面的右上角都有用户的身份标识。点击下拉箭头包含用户信息和退出两个按钮
> 请参照[快速上手](快速上手.md)
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/logout.png" width="60%" />
</p>
## 操作指南
- 点击“用户信息”按钮,如下图:
- 管理员账号只能在权限上进行管理,不参与具体的业务,不能创建项目,不能对流程定义执行相关操作
- 以下操作需要使用普通用户登录系统才能进行。
### 创建项目
- 点击“项目管理->创建项目”,输入项目名称,项目描述,点击“提交”,创建新的项目。
- 点击项目名称,进入项目首页。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/userinfo.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/project.png" width="60%" />
</p>
- 点击”修改”按钮,修改用户信息
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/useredit.png" width="60%" />
</p>
- 点击退出按钮则退出系统,返回登录页面
## 安全中心
> 项目首页其中包含任务状态统计,流程状态统计、流程定义统计、队列统计、命令统计
- 只有管理员才有安全中心,安全中心的主要功能是给管理员提供管理普通用户的功能。
- 管理员可以有多个,管理员是功能上的管理,不参与具体的业务。也就是说管理员是不能执行具体任务的。
### 租户管理
> 租户是Linux上的用户,用于作业的提交。
- 创建、编辑租户
- 任务状态统计:是指在指定时间范围内,统计任务实例中的待运行、失败、运行中、完成、成功的个数
- 流程状态统计:是指在指定时间范围内,统计流程实例中的待运行、失败、运行中、完成、成功的个数
- 流程定义统计:是统计该用户创建的流程定义及管理员授予该用户的流程定义
- 队列统计: worker执行队列统计,待执行的任务和待杀掉的任务个数
- 命令统计: 执行命令个数统计
### 创建工作流定义
- 进入项目首页,点击“工作流定义”,进入流程定义列表页。
- 点击“创建工作流”,创建新的流程定义。
- 拖拽“SHELL"节点到画布,新增一个Shell任务。
- 填写”节点名称“,”描述“,”脚本“字段。
- 选择“任务优先级”,级别高的任务在执行队列中会优先执行,相同优先级的任务按照先进先出的顺序执行。
- 超时告警, 填写”超时时长“,当任务执行时间超过**超时时长**可以告警并且超时失败。
- 填写"自定义参数",参考[自定义参数](#用户自定义参数)
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/addtenant.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dag1.png" width="60%" />
</p>
- 租户编码:租户编码是Linux上的用户,唯一,不能重复
- 租户名称:租户的名称
- 队列:租户对应的YARN上的队列,在数据库 t_escheduler_queue 中设置
- 描述:租户的描述信息
### 用户管理
- 增加节点之间执行的先后顺序: 点击”线条连接“;如图示,任务1和任务3并行执行,当任务1执行完,任务2、3会同时执行。
> 用户是EasyScheduler上的用户,用于EasyScheduler上的功能操作。
- 创建、编辑用户
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/useredit2.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dag2.png" width="60%" />
</p>
- 用户名称:用户的名称,唯一,不能重复
- 租户:设置该用户所属的租户
- 邮箱:输入用户的邮箱,用来邮件发送和任务告警
- 手机:输入用户的手机号
**注意:如果该用户切换了租户,则该用户所在租户下所有资源将复制到切换的新租户下**
- 授权
> 管理员可以对普通用户进行非其创建的项目、资源、数据源和UDF函数进行授权。因为项目、资源、数据源和UDF函数授权方式都是一样的,所以以项目授权为例介绍。
- 1.点击指定人的授权按钮,如下图:
- 删除依赖关系: 点击箭头图标”拖动节点和选中项“,选中连接线,点击删除图标,删除节点间依赖关系。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/auth_user.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dag3.png" width="60%" />
</p>
- 2.选中项目按钮,进行项目授权
- 点击”保存“,输入流程定义名称,流程定义描述,设置全局参数。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/auth_project.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dag4.png" width="60%" />
</p>
- 项目列表:是该用户未授权的项目
- 已选项目:是该用户已授权的项目。
- 特别注意:对于用户自己创建的项目,该用户拥有所有的权限。则项目列表和已选项目列表中不会体现。
### 告警组管理
- 其他类型节点,请参考 [任务节点类型和参数设置](#任务节点类型和参数设置)
> 告警组是告警用户抽象出来的组,使用告警组来管理用户。
### 执行流程定义
- **未上线状态的流程定义可以编辑,但是不可以运行**,所以先上线工作流
> 点击工作流定义,返回流程定义列表,点击”上线“图标,上线工作流定义。
- 新建、编辑邮件组
> "下线"工作流之前,要先将定时管理的定时下线,才能成功下线工作流定义
- 点击”运行“,执行工作流。运行参数说明:
* 失败策略:**当某一个任务节点执行失败时,其他并行的任务节点需要执行的策略**。”继续“表示:其他任务节点正常执行,”结束“表示:终止所有正在执行的任务,并终止整个流程。
* 通知策略:当流程结束,根据流程状态发送流程执行信息通知邮件。
* 流程优先级:流程运行的优先级,分五个等级:最高(HIGHEST),高(HIGH),中(MEDIUM),低(LOW),最低(LOWEST)。级别高的流程在执行队列中会优先执行,相同优先级的流程按照先进先出的顺序执行。
* worker分组: 这个流程只能在指定的机器组里执行。默认是Default,可以在任一worker上执行。
* 通知组: 当流程结束,或者发生容错时,会发送流程信息邮件到通知组里所有成员。
* 收件人:输入邮箱后按回车键保存。当流程结束、发生容错时,会发送告警邮件到收件人列表。
* 抄送人:输入邮箱后按回车键保存。当流程结束、发生容错时,会抄送告警邮件到抄送人列表。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/mail_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/run-work.png" width="60%" />
</p>
- 组名称:输入组的名称
- 组类型:支持邮件/短信两种
- 备注:输入告警组的备注信息
- 管理用户
* 补数: 执行指定日期的工作流定义,可以选择补数时间范围(目前只支持针对连续的天进行补数),比如要补5月1号到5月10号的数据,如图示:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/user_manager.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/complement.png" width="60%" />
</p>
- 管理用户列表:是未添加到该组的用户列表
- 已选管理用户:是已添加到该组的用户列表
> 补数执行模式有**串行执行、并行执行**,串行模式下,补数会从5月1号到5月10号依次执行;并行模式下,会同时执行5月1号到5月10号的任务。
### 服务管理
> 服务管理是对EasyScheduler的Master、Worker的任务监控
#### Master
### 定时工作流定义
- 创建定时:"工作流定义->定时”
- 选择起止时间,在起止时间范围内,定时正常工作,超过范围,就不会再继续产生定时工作流实例了。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/master.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/time-schedule.png" width="60%" />
</p>
#### Worker
- 添加一个每天凌晨5点执行一次的定时,如图示:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/worker.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/time-schedule2.png" width="60%" />
</p>
## 资源中心
- 定时上线,**新创建的定时是下线状态,需要点击“定时管理->上线”,定时才能正常工作**。
> 资源中心主要分为文件管理和UDF函数管理。
文件管理:主要是用户的程序,脚本和配置文件需要上传到HDFS进行统一管理
UDF函数管理:对用户创建的UDF进行管理
### 查看流程实例
> 点击“工作流实例”,查看流程实例列表。
### 文件管理
#### 创建文件
> 点击工作流名称,查看任务执行状态。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/file_create.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/instance-detail.png" width="60%" />
</p>
- 文件格式支持以下几种类型:txt、log、sh、conf、cfg、py、java、sql、xml、hql
#### 上传文件
> 点击任务节点,点击“查看日志”,查看任务执行日志。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/file_upload.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/task-log.png" width="60%" />
</p>
- 文件名:输入文件的名称
- 描述:输入文件的描述信息
- 上传文件:点击上传按钮进行上传,将文件拖拽到上传区域,文件名会自动以上传的文件名称补全
#### 文件查看
> 对可查看的文件类型,点击 文件名称 可以查看文件详情
> 点击任务实例节点,点击**查看历史**,可以查看该流程实例运行的该任务实例列表
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/file_detail.png" width="60%" />
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_history.png" width="60%" />
</p>
#### 下载文件
> 可以在 文件详情 中点击右上角下载按钮下载文件,或者在文件列表后的下载按钮下载文件
#### 文件重命名
> 对工作流实例的操作:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/file_rename.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/instance-list.png" width="60%" />
</p>
#### 删除
- 文件列表,点击 删除 按钮,删除文件
### UDF管理
#### 资源管理
> 资源管理和文件管理功能类似,不同之处是资源管理是上传的UDF函数,文件管理上传的是用户程序,脚本及配置文件
#### 函数管理
* 编辑:可以对已经终止的流程进行编辑,编辑后保存的时候,可以选择是否更新到流程定义。
* 重跑:可以对已经终止的流程进行重新执行。
* 恢复失败:针对失败的流程,可以执行恢复失败操作,从失败的节点开始执行。
* 停止:对正在运行的流程进行**停止**操作,后台会先对worker进程`kill`,再执行`kill -9`操作
* 暂停:可以对正在运行的流程进行**暂停**操作,系统状态变为**等待执行**,会等待正在执行的任务结束,暂停下一个要执行的任务。
* 恢复暂停:可以对暂停的流程恢复,直接从**暂停的节点**开始运行
* 删除:删除流程实例及流程实例下的任务实例
* 甘特图:Gantt图纵轴是某个流程实例下的任务实例的拓扑排序,横轴是任务实例的运行时间,如图示:
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/gant-pic.png" width="60%" />
</p>
##### 创建、编辑UDF函数
### 查看任务实例
> 点击“任务实例”,进入任务列表页,查询任务执行情况
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/udf_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/task-list.png" width="60%" />
</p>
目前只支持HIVE的临时UDF函数
- UDF函数名称:输入UDF函数时的名称
- 包名类名:输入UDF函数的全路径
- 参数:用来标注函数的输入参数
- 数据库名:预留字段,用于创建永久UDF函数
- UDF资源:设置创建的UDF对应的资源文件
> 点击操作列中的“查看日志”,可以查看任务执行的日志情况。
## 数据源中心
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/task-log2.png" width="60%" />
</p>
> 数据源中心支持MySQL、POSTGRESQL、HIVE及Spark数据源
### 创建数据源
> 数据源中心支持MySQL、POSTGRESQL、HIVE及Spark等数据源
### 创建、编辑MySQL数据源
#### 创建、编辑MySQL数据源
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/mysql_edit.png" width="60%" />
</p>
- 点击“数据源中心->创建数据源”,根据需求创建不同类型的数据源。
- 数据源:选择MYSQL
- 数据源名称:输入数据源的名称
@ -219,12 +166,14 @@ UDF函数管理:对用户创建的UDF进行管理
- 数据库名:输入连接MySQL的数据库名称
- Jdbc连接参数:用于MySQL连接的参数设置,以JSON形式填写
### 创建、编辑POSTGRESQL数据源
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/postgresql_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mysql_edit.png" width="60%" />
</p>
> 点击“测试连接”,测试数据源是否可以连接成功。
#### 创建、编辑POSTGRESQL数据源
- 数据源:选择POSTGRESQL
- 数据源名称:输入数据源的名称
- 描述:输入数据源的描述
@ -235,13 +184,16 @@ UDF函数管理:对用户创建的UDF进行管理
- 数据库名:输入连接POSTGRESQL的数据库名称
- Jdbc连接参数:用于POSTGRESQL连接的参数设置,以JSON形式填写
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/postgresql_edit.png" width="60%" />
</p>
### 创建、编辑HIVE数据源
#### 创建、编辑HIVE数据源
1.使用HiveServer2方式连接
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/hive_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/hive_edit.png" width="60%" />
</p>
- 数据源:选择HIVE
@ -257,24 +209,14 @@ UDF函数管理:对用户创建的UDF进行管理
2.使用HiveServer2 HA Zookeeper方式连接
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/hive_edit2.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/hive_edit2.png" width="60%" />
</p>
- 数据源:选择HIVE
- 数据源名称:输入数据源的名称
- 描述:输入数据源的描述
- IP/主机名:输入连接Zookeeper的集群
- 端口:输入连接Zookeeper的端口
- 用户名:设置连接HIVE的用户名
- 密码:设置连接HIVE的密码
- 数据库名:输入连接HIVE的数据库名称
- Jdbc连接参数:用于Zookeeper连接的参数设置,以JSON形式填写
### 创建、编辑Spark数据源
#### 创建、编辑Spark数据源
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/spark_datesource.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/spark_datesource.png" width="60%" />
</p>
- 数据源:选择Spark
@ -287,361 +229,383 @@ UDF函数管理:对用户创建的UDF进行管理
- 数据库名:输入连接Spark的数据库名称
- Jdbc连接参数:用于Spark连接的参数设置,以JSON形式填写
## 首页
### 上传资源
- 上传资源文件和udf函数,所有上传的文件和资源都会被存储到hdfs上,所以需要以下配置项:
```
conf/common/common.properties
-- hdfs.startup.state=true
conf/common/hadoop.properties
-- fs.defaultFS=hdfs://xxxx:8020
-- yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
-- yarn.application.status.address=http://xxxx:8088/ws/v1/cluster/apps/%s
```
#### 文件管理
> 是对各种资源文件的管理,包括创建基本的txt/log/sh/conf等文件、上传jar包等各种类型文件,以及编辑、下载、删除等操作。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/project_index.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/file-manage.png" width="60%" />
</p>
> 首页是对所有项目在指定时间范围内的任务状态、流程状态和流程定义的统计。
* 创建文件
> 文件格式支持以下几种类型:txt、log、sh、conf、cfg、py、java、sql、xml、hql
首页和项目首页的主要区别在于:
- 首页中的图表是没有链接的,项目首页中图表是有链接的
- 首页统计的是所有的项目,项目首页统计的是某一个项目
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/file_create.png" width="60%" />
</p>
## 项目管理
> 项目是调度对用户流程定义DAG分组的一个抽象
* 上传文件
### 创建、编辑项目
> 上传文件:点击上传按钮进行上传,将文件拖拽到上传区域,文件名会自动以上传的文件名称补全
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/project_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/file_upload.png" width="60%" />
</p>
- 项目名称:输入项目的名称
- 描述:输入项目的描述
### 项目首页
* 文件查看
> 点击项目列表中的项目名称,可以跳转到指定的项目首页,如下图:
> 对可查看的文件类型,点击 文件名称 可以查看文件详情
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/project_index.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/file_detail.png" width="60%" />
</p>
- 项目首页其中包含四个部分,任务状态统计,流程状态统计、流程定义统计及统计的时间范围
- 任务状态统计:是指在指定时间范围内,统计任务实例中的待运行、失败、运行中、完成、成功的个数
- 流程状态统计:是指在指定时间范围内,统计流程实例中的待运行、失败、运行中、完成、成功的个数
- 流程定义统计:是统计该用户创建的流程定义及管理员授予该用户的流程定义
* 下载文件
> 可以在 文件详情 中点击右上角下载按钮下载文件,或者在文件列表后的下载按钮下载文件
* 文件重命名
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/file_rename.png" width="60%" />
</p>
注意:可以点击图,或者数量跳转到相应的任务实例,流程实例和流程定义列表
#### 删除
> 文件列表->点击"删除"按钮,删除指定文件
### 工作流
#### 资源管理
> 资源管理和文件管理功能类似,不同之处是资源管理是上传的UDF函数,文件管理上传的是用户程序,脚本及配置文件
> 工作流分为流程定义、流程实例和任务实例三个功能模块
* 上传udf资源
> 和上传文件相同。
- 流程定义:是可视化拖拽成的DAG的统称,它是静态的,没有状态
- 流程实例:对流程定义的每次实例化会生成一个流程实例,是动态的,是有状态的
- 任务实例:流程实例DAG中每个Task称为任务实例,是动态的,是有状态的
#### 函数管理
* 创建udf函数
> 点击“创建UDF函数”,输入udf函数参数,选择udf资源,点击“提交”,创建udf函数。
#### 流程定义
> 目前只支持HIVE的临时UDF函数
##### 创建工作流
- UDF函数名称:输入UDF函数时的名称
- 包名类名:输入UDF函数的全路径
- 参数:用来标注函数的输入参数
- 数据库名:预留字段,用于创建永久UDF函数
- UDF资源:设置创建的UDF对应的资源文件
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/definition_create.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/udf_edit.png" width="60%" />
</p>
- 左侧工具栏 => 是目前调度支持的任务类型,当前调度支持SHELL、子流程、存储过程、SQL、MR、Spark和Python七种任务类型
- 右上角图标 => 分别是拖动节点和选中项、选择线条连线、删除选中的线或节点、全屏和流程定义保持,其主要功能是DAG的绘制所用
##### 创建 SHELL节点
## 安全中心(权限系统)
> 拖动工具栏中的![PNG](https://analysys.github.io/EasyScheduler/zh_CN/images/toolbar_SHELL.png)任务节点到画板中,双击任务节点,如下图:
- 安全中心是只有管理员账户才有权限的功能,有队列管理、租户管理、用户管理、告警组管理、worker分组、令牌管理等功能,还可以对资源、数据源、项目等授权
- 管理员登录,默认用户名密码:admin/escheduler123
### 创建队列
- 队列是在执行spark、mapreduce等程序,需要用到“队列”参数时使用的。
- “安全中心”->“队列管理”->“创建队列”
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/shell_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/create-queue.png" width="60%" />
</p>
- 节点名称:一个流程定义中的节点名称是唯一的
- 运行标志:标识这个节点是否能正常调度
- 描述信息:描述该节点的功能
- 失败重试次数:任务失败重新提交的次数,支持下拉和手填
- 失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填
- 脚本:用户开发的SHELL程序
- 资源:是指脚本中需要调用的资源文件列表
- 自定义参数:是SHELL局部的用户自定义参数,会替换脚本中以${变量}的内容
##### 创建 子流程 节点
### 添加租户
- 租户对应的是Linux的用户,用于worker提交作业所使用的用户。如果linux没有这个用户,worker会在执行脚本的时候创建这个用户。
- 租户编码:**租户编码是Linux上的用户,唯一,不能重复**
> 拖动工具栏中的![PNG](https://analysys.github.io/EasyScheduler/zh_CN/images/toolbar_SUB_PROCESS.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/addtenant.png" width="60%" />
</p>
### 创建普通用户
- 用户分为**管理员用户**和**普通用户**
* 管理员只有**授权和用户管理**等权限,没有**创建项目和流程定义**的操作的权限
* 普通用户可以**创建项目和对流程定义的创建,编辑,执行**等操作。
* 注意:**如果该用户切换了租户,则该用户所在租户下所有资源将复制到切换的新租户下**
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/subprocess_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/useredit2.png" width="60%" />
</p>
- 节点名称:一个流程定义中的节点名称是唯一的
- 运行标志:标识这个节点是否能正常调度
- 描述信息:描述该节点的功能
- 子节点:是选择子流程的流程定义,右上角进入该子节点可以跳转到所选子流程的流程定义
### 创建告警组
* 告警组是在启动时设置的参数,在流程结束以后会将流程的状态和其他信息以邮件形式发送给告警组。
- 新建、编辑告警组
##### 创建 存储过程 节点
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mail_edit.png" width="60%" />
</p>
> 拖动工具栏中的![PNG](https://analysys.github.io/EasyScheduler/zh_CN/images/toolbar_PROCEDURE.png)任务节点到画板中,双击任务节点,如下图:
### 创建worker分组
- worker分组,提供了一种让任务在指定的worker上运行的机制。管理员设置worker分组,每个任务节点可以设置该任务运行的worker分组,如果任务指定的分组被删除或者没有指定分组,则该任务会在流程实例指定的worker上运行。
- worker分组内多个ip地址(**不能写别名**),以**英文逗号**分隔
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/procedure_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/worker1.png" width="60%" />
</p>
- 节点名称:一个流程定义中的节点名称是唯一的
- 运行标志:标识这个节点是否能正常调度
- 描述信息:描述该节点的功能
- 失败重试次数:任务失败重新提交的次数,支持下拉和手填
- 失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填
- 数据源:存储过程的数据源类型支持MySQL和POSTGRESQL两种,选择对应的数据源
- 方法:是存储过程的方法名称
- 自定义参数:存储过程的自定义参数类型支持IN、OUT两种,数据类型支持VARCHAR、INTEGER、LONG、FLOAT、DOUBLE、DATE、TIME、TIMESTAMP、BOOLEAN九种数据类型
### 令牌管理
- 由于后端接口有登录检查,令牌管理,提供了一种可以通过调用接口的方式对系统进行各种操作。
- 调用示例:
```令牌调用示例
/**
* test token
*/
public void doPOSTParam()throws Exception{
// create HttpClient
CloseableHttpClient httpclient = HttpClients.createDefault();
// create http post request
HttpPost httpPost = new HttpPost("http://127.0.0.1:12345/escheduler/projects/create");
httpPost.setHeader("token", "123");
// set parameters
List<NameValuePair> parameters = new ArrayList<NameValuePair>();
parameters.add(new BasicNameValuePair("projectName", "qzw"));
parameters.add(new BasicNameValuePair("desc", "qzw"));
UrlEncodedFormEntity formEntity = new UrlEncodedFormEntity(parameters);
httpPost.setEntity(formEntity);
CloseableHttpResponse response = null;
try {
// execute
response = httpclient.execute(httpPost);
// eponse status code 200
if (response.getStatusLine().getStatusCode() == 200) {
String content = EntityUtils.toString(response.getEntity(), "UTF-8");
System.out.println(content);
}
} finally {
if (response != null) {
response.close();
}
httpclient.close();
}
}
```
##### 创建 SQL 节点
### 授予权限
- 授予权限包括项目权限,资源权限,数据源权限,UDF函数权限。
> 管理员可以对普通用户进行非其创建的项目、资源、数据源和UDF函数进行授权。因为项目、资源、数据源和UDF函数授权方式都是一样的,所以以项目授权为例介绍。
> 拖动工具栏中的![PNG](https://analysys.github.io/EasyScheduler/zh_CN/images/toolbar_SQL.png)任务节点到画板中,双击任务节点,如下图:
> 注意:**对于用户自己创建的项目,该用户拥有所有的权限。则项目列表和已选项目列表中不会体现**
- 1.点击指定人的授权按钮,如下图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/sql_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/auth_user.png" width="60%" />
</p>
- 节点名称:一个流程定义中的节点名称是唯一的
- 运行标志:标识这个节点是否能正常调度
- 描述信息:描述该节点的功能
- 失败重试次数:任务失败重新提交的次数,支持下拉和手填
- 失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填
- 数据源:SQL数据源支持MySQL、POSTGRESQL、HIVE和Spark四中数据源类型,选择对应的数据源
- sql类型:支持查询和非查询两种,查询是select类型的查询,是有结果集返回的,可以指定邮件通知为表格、附件或表格附件三种模板。非查询是没有结果集返回的,是针对update、delete、insert三种类型的操作
- sql参数:输入参数格式为key1=value1;key2=value2…
- sql语句:SQL语句
- UDF函数:对于HIVE类型的数据源,可以引用资源中心中创建的UDF函数,其他类型的数据源暂不支持UDF函数
- 自定义参数:SQL任务类型自定义参数类型和数据类型同存储过程任务类型一样。区别在于SQL任务类型自定义参数会替换sql语句中${变量},而存储过程是自定义参数顺序的给方法设置值
##### 创建 MR 节点
> 拖动工具栏中的![PNG](https://analysys.github.io/EasyScheduler/zh_CN/images/toolbar_MR.png)任务节点到画板中,双击任务节点,如下图:
(1) JAVA程序
- 2.选中项目按钮,进行项目授权
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/mr_java.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/auth_project.png" width="60%" />
</p>
- 节点名称:一个流程定义中的节点名称是唯一的
- 运行标志:标识这个节点是否能正常调度
- 描述信息:描述该节点的功能
- 失败重试次数:任务失败重新提交的次数,支持下拉和手填
- 失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填
- 主函数的class:是MR程序的入口Main Class的全路径
- 程序类型:选择JAVA语言
- 主jar包:是MR的jar包
- 命令行参数:是设置MR程序的输入参数,支持自定义参数变量的替换
- 其他参数:支持 –D、-files、-libjars、-archives格式
- 资源: 如果其他参数中引用了资源文件,需要在资源中选择指定
- 自定义参数:是MR局部的用户自定义参数,会替换脚本中以${变量}的内容
(2) Python程序
## 监控中心
### 服务管理
- 服务管理主要是对系统中的各个服务的健康状况和基本信息的监控和显示
#### master监控
- 主要是master的相关信息。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/mr_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/master-jk.png" width="60%" />
</p>
- 节点名称:一个流程定义中的节点名称是唯一的
- 运行标志:标识这个节点是否能正常调度
- 描述信息:描述该节点的功能
- 失败重试次数:任务失败重新提交的次数,支持下拉和手填
- 失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填
- 程序类型:选择Python语言
- 主jar包:是运行MR的Python jar包
- 其他参数:支持 –D、-mapper、-reducer、-input -output格式,这里可以设置用户自定义参数的输入,比如:
- -mapper "mapper.py 1" -file mapper.py -reducer reducer.py -file reducer.py –input /journey/words.txt -output /journey/out/mr/${currentTimeMillis}
- 其中 -mapper 后的 mapper.py 1是两个参数,第一个参数是mapper.py,第二个参数是1
- 资源: 如果其他参数中引用了资源文件,需要在资源中选择指定
- 自定义参数:是MR局部的用户自定义参数,会替换脚本中以${变量}的内容
#### worker监控
- 主要是worker的相关信息。
##### 创建 Spark 节点
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/worker-jk.png" width="60%" />
</p>
> 拖动工具栏中的![PNG](https://analysys.github.io/EasyScheduler/zh_CN/images/toolbar_SPARK.png)任务节点到画板中,双击任务节点,如下图:
#### Zookeeper监控
- 主要是zookpeeper中各个worker和master的相关配置信息。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/spark_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/zk-jk.png" width="60%" />
</p>
- 节点名称:一个流程定义中的节点名称是唯一的
- 运行标志:标识这个节点是否能正常调度
- 描述信息:描述该节点的功能
- 失败重试次数:任务失败重新提交的次数,支持下拉和手填
- 失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填
- 程序类型:支持JAVA、Scala和Python三种语言
- 主函数的class:是Spark程序的入口Main Class的全路径
- 主jar包:是Spark的jar包
- 部署方式:支持yarn-cluster、yarn-client、和local三种模式
- Driver内核数:可以设置Driver内核数及内存数
- Executor数量:可以设置Executor数量、Executor内存数和Executor内核数
- 命令行参数:是设置Spark程序的输入参数,支持自定义参数变量的替换。
- 其他参数:支持 --jars、--files、--archives、--conf格式
- 资源:如果其他参数中引用了资源文件,需要在资源中选择指定
- 自定义参数:是MR局部的用户自定义参数,会替换脚本中以${变量}的内容
#### Mysql监控
- 主要是mysql的健康状况
注意:JAVA和Scala只是用来标识,没有区别,如果是Python开发的Spark则没有主函数的class,其他都是一样
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mysql-jk.png" width="60%" />
</p>
##### 创建 Python 节点
## 任务节点类型和参数设置
> 拖动工具栏中的![PNG](https://analysys.github.io/EasyScheduler/zh_CN/images/toolbar_PYTHON.png)任务节点到画板中,双击任务节点,如下图:
### Shell节点
- shell节点,在worker执行的时候,会生成一个临时shell脚本,使用租户同名的linux用户执行这个脚本。
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SHELL.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/python_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/shell_edit.png" width="60%" />
</p>
- 节点名称:一个流程定义中的节点名称是唯一的
- 运行标志:标识这个节点是否能正常调度
- 运行标志:标识这个节点是否能正常调度,如果不需要执行,可以打开禁止执行开关。
- 描述信息:描述该节点的功能
- 失败重试次数:任务失败重新提交的次数,支持下拉和手填
- 失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填
- 脚本:用户开发的Python程序
- 脚本:用户开发的SHELL程序
- 资源:是指脚本中需要调用的资源文件列表
- 自定义参数:是Python局部的用户自定义参数,会替换脚本中以${变量}的内容
##### 创建 依赖 节点
> 任务依赖分为水平依赖和垂直依赖
- 水平依赖就是指DAG图的有向依赖,是同一个流程实例任务节点的前驱,后继之间的依赖关系
- 垂直依赖是流程实例之间的任务依赖,基于定时的依赖。
- 自定义参数:是SHELL局部的用户自定义参数,会替换脚本中以${变量}的内容
> 拖动工具栏中的![PNG](https://analysys.github.io/EasyScheduler/zh_CN/images/toolbar_DEPENDENT.png)任务节点到画板中,双击任务节点,如下图:
### 子流程节点
- 子流程节点,就是把外部的某个工作流定义当做自己的一个任务节点去执行。
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SUB_PROCESS.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/dependent_edit.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/subprocess_edit.png" width="60%" />
</p>
- 节点名称:一个流程定义中的节点名称是唯一的
- 运行标志:标识这个节点是否能正常调度
- 描述信息:描述该节点的功能
- 失败重试次数:任务失败重新提交的次数,支持下拉和手填
- 失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填
- 任务依赖:增加依赖条件,选择依赖流程定义、节点名称(默认为全部节点)、依赖周期、依赖时间点
- 子节点:是选择子流程的流程定义,右上角进入该子节点可以跳转到所选子流程的流程定义
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/dependent_edit3.png" width="60%" />
</p>
### 依赖(DEPENDENT)节点
- 依赖节点,就是**依赖检查节点**。比如A流程依赖昨天的B流程执行成功,依赖节点会去检查B流程在昨天是否有执行成功的实例。
- 选择多个依赖条件之间的关系:或、且
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/dependent_edit4.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dependent_edit.png" width="60%" />
</p>
#### 流程实例列表
> 流程实例列表页是可以显示所有本项目下所有流程实例的列表,并有对流程实例进行名称、状态、时间等字段的筛选功能。
> 通过列表页可以直接对某一个流程实例进行编辑、重跑、恢复失败、暂停、停止、恢复暂停、删除、查看甘特图等操作.
> 依赖节点提供了逻辑判断功能,比如检查昨天的B流程是否成功,或者C流程是否执行成功。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/process-list.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/depend-node.png" width="80%" />
</p>
- 编辑功能: 对已经完成的流程实例,点击编辑按钮,可以对其编辑,如图
> 例如,A流程为周报任务,B、C流程为天任务,A任务需要B、C任务在上周的每一天都执行成功,如图示
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/process-list2.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/depend-node2.png" width="80%" />
</p>
- 查看流程实例运行变量
> 假如,周报A同时还需要自身在上周二执行成功:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/variable_view.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/depend-node3.png" width="80%" />
</p>
- 点击隐藏按钮,查看流程实例运行变量。如下图:
### 存储过程节点
- 根据选择的数据源,执行存储过程。
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PROCEDURE.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/variable_view2.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/procedure_edit.png" width="60%" />
</p>
- 点击变量是对变量的复制
- 点击"重跑",可以对已经完成的流程实例进行重新运行操作,如图:
- 数据源:存储过程的数据源类型支持MySQL和POSTGRESQL两种,选择对应的数据源
- 方法:是存储过程的方法名称
- 自定义参数:存储过程的自定义参数类型支持IN、OUT两种,数据类型支持VARCHAR、INTEGER、LONG、FLOAT、DOUBLE、DATE、TIME、TIMESTAMP、BOOLEAN九种数据类型
### SQL节点
- 执行非查询SQL功能
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/process-list3.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/sql-node.png" width="60%" />
</p>
- 点击"恢复失败", 可以对失败的流程进行恢复,直接从失败的任务节点开始运行。如图:
- 执行查询SQL功能,可以选择通过表格和附件形式发送邮件到指定的收件人。
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SQL.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/process-list4.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/sql-node2.png" width="60%" />
</p>
- 点击"暂停", 可以对正在运行的流程进行**暂停**操作,如图:
- 数据源:选择对应的数据源
- sql类型:支持查询和非查询两种,查询是select类型的查询,是有结果集返回的,可以指定邮件通知为表格、附件或表格附件三种模板。非查询是没有结果集返回的,是针对update、delete、insert三种类型的操作
- sql参数:输入参数格式为key1=value1;key2=value2…
- sql语句:SQL语句
- UDF函数:对于HIVE类型的数据源,可以引用资源中心中创建的UDF函数,其他类型的数据源暂不支持UDF函数
- 自定义参数:SQL任务类型,而存储过程是自定义参数顺序的给方法设置值自定义参数类型和数据类型同存储过程任务类型一样。区别在于SQL任务类型自定义参数会替换sql语句中${变量}
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/process-list-pause.png" width="60%" />
</p>
### SPARK节点
- 通过SPARK节点,可以直接直接执行SPARK程序,对于spark节点,worker会使用`spark-submit`方式提交任务
- 点击"停止",可以对正在运行的流程进行**停止**操作,如图:
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SPARK.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/process-list-stop.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/spark_edit.png" width="60%" />
</p>
- 点击"恢复暂停",可以对暂停的流程恢复,直接从**暂停的节点**开始运行,如图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/process-list-recovery-pause.png" width="60%" />
</p>
- 程序类型:支持JAVA、Scala和Python三种语言
- 主函数的class:是Spark程序的入口Main Class的全路径
- 主jar包:是Spark的jar包
- 部署方式:支持yarn-cluster、yarn-client、和local三种模式
- Driver内核数:可以设置Driver内核数及内存数
- Executor数量:可以设置Executor数量、Executor内存数和Executor内核数
- 命令行参数:是设置Spark程序的输入参数,支持自定义参数变量的替换。
- 其他参数:支持 --jars、--files、--archives、--conf格式
- 资源:如果其他参数中引用了资源文件,需要在资源中选择指定
- 自定义参数:是MR局部的用户自定义参数,会替换脚本中以${变量}的内容
- 删除
> 删除流程实例及流程实例下的任务实例
注意:JAVA和Scala只是用来标识,没有区别,如果是Python开发的Spark则没有主函数的class,其他都是一样
- Gantt
### MapReduce(MR)节点
- 使用MR节点,可以直接执行MR程序。对于mr节点,worker会使用`hadoop jar`方式提交任务
> Gantt图纵轴是某个流程实例下的任务实例的拓扑排序,横轴是任务实例的运行时间
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/gantt.png" width="60%" />
</p>
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_MR.png)任务节点到画板中,双击任务节点,如下图:
#### 任务实例列表页
1. JAVA程序
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_history.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mr_java.png" width="60%" />
</p>
- 点击任务实例节点,点击 查看历史,可以查看该流程实例运行的该任务实例列表
- 主函数的class:是MR程序的入口Main Class的全路径
- 程序类型:选择JAVA语言
- 主jar包:是MR的jar包
- 命令行参数:是设置MR程序的输入参数,支持自定义参数变量的替换
- 其他参数:支持 –D、-files、-libjars、-archives格式
- 资源: 如果其他参数中引用了资源文件,需要在资源中选择指定
- 自定义参数:是MR局部的用户自定义参数,会替换脚本中以${变量}的内容
##### 查看日志
2. Python程序
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_log.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mr_edit.png" width="60%" />
</p>
- 点击任务实例节点,点击 查看日志,可以查看该任务实例运行的日志,如下图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_log2.png" width="60%" />
</p>
- 程序类型:选择Python语言
- 主jar包:是运行MR的Python jar包
- 其他参数:支持 –D、-mapper、-reducer、-input -output格式,这里可以设置用户自定义参数的输入,比如:
- -mapper "mapper.py 1" -file mapper.py -reducer reducer.py -file reducer.py –input /journey/words.txt -output /journey/out/mr/${currentTimeMillis}
- 其中 -mapper 后的 mapper.py 1是两个参数,第一个参数是mapper.py,第二个参数是1
- 资源: 如果其他参数中引用了资源文件,需要在资源中选择指定
- 自定义参数:是MR局部的用户自定义参数,会替换脚本中以${变量}的内容
- 右上角是下载日志、刷新日志和放大/缩小按钮
- 注意:日志查看是分片的查看,上下滚动查看
### Python节点
- 使用python节点,可以直接执行python脚本,对于python节点,worker会使用`python **`方式提交任务。
### 任务实例
> 任务实例是流程实例任务节点的列表
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PYTHON.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_list.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/python_edit.png" width="60%" />
</p>
两种方式查看任务实例:
- 第一种是通过流程实例任务节点 查看历史,这时查看的是此流程实例的任务实例 重跑的列表
- 第二种是通过点击 流程实例 导航栏,调转到流程实例列表,这时查看的是所有流程实例的任务实例列表
> 查看日志:点击 查看日志 按钮,可下载和查看日志
- 脚本:用户开发的Python程序
- 资源:是指脚本中需要调用的资源文件列表
- 自定义参数:是Python局部的用户自定义参数,会替换脚本中以${变量}的内容
## 系统参数
### 系统参数
<table>
<tr><th>变量</th><th>含义</th></tr>
<tr>
@ -680,7 +644,6 @@ UDF函数管理:对用户创建的UDF进行管理
- 后 N 分钟:$[HHmmss+N/24/60]
- 前 N 分钟:$[HHmmss-N/24/60]
### 用户自定义参数
> 用户自定义参数分为全局参数和局部参数。全局参数是保存流程定义和流程实例的时候传递的全局参数,全局参数可以在整个流程中的任何一个任务节点的局部参数引用。
@ -688,13 +651,13 @@ UDF函数管理:对用户创建的UDF进行管理
> 例如:
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/local_parameter.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/local_parameter.png" width="60%" />
</p>
- global_bizdate为全局参数,引用的是系统参数。
> global_bizdate为全局参数,引用的是系统参数。
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/global_parameter.png" width="60%" />
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/global_parameter.png" width="60%" />
</p>
- 任务中local_param_bizdate通过${global_bizdate}来引用全局参数,对于脚本可以通过${local_param_bizdate}来引用变量local_param_bizdate的值,或通过JDBC直接将local_param_bizdate的值set进去
> 任务中local_param_bizdate通过${global_bizdate}来引用全局参数,对于脚本可以通过${local_param_bizdate}来引用变量local_param_bizdate的值,或通过JDBC直接将local_param_bizdate的值set进去

2
escheduler-alert/pom.xml

@ -4,7 +4,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.4-SNAPSHOT</version>
</parent>
<artifactId>escheduler-alert</artifactId>
<packaging>jar</packaging>

14
escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java

@ -59,6 +59,8 @@ public class Constants {
public static final String MAIL_SMTP_STARTTLS_ENABLE = "mail.smtp.starttls.enable";
public static final String MAIL_SMTP_SSL_ENABLE = "mail.smtp.ssl.enable";
public static final String TEXT_HTML_CHARSET_UTF_8 = "text/html;charset=utf-8";
public static final String STRING_TRUE = "true";
@ -126,4 +128,16 @@ public class Constants {
public static final String TH_END = "</th>";
public static final int ALERT_SCAN_INTERVEL = 5000;
public static final String ENTERPRISE_WECHAT_CORP_ID = "enterprise.wechat.corp.id";
public static final String ENTERPRISE_WECHAT_SECRET = "enterprise.wechat.secret";
public static final String ENTERPRISE_WECHAT_TOKEN_URL = "enterprise.wechat.token.url";
public static final String ENTERPRISE_WECHAT_PUSH_URL = "enterprise.wechat.push.url";
public static final String ENTERPRISE_WECHAT_TEAM_SEND_MSG = "enterprise.wechat.team.send.msg";
public static final String ENTERPRISE_WECHAT_USER_SEND_MSG = "enterprise.wechat.user.send.msg";
}

167
escheduler-alert/src/main/java/cn/escheduler/alert/utils/EnterpriseWeChatUtils.java

@ -0,0 +1,167 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.alert.utils;
import com.alibaba.fastjson.JSON;
import com.google.common.reflect.TypeToken;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import static cn.escheduler.alert.utils.PropertyUtils.getString;
/**
* qiye weixin utils
*/
public class EnterpriseWeChatUtils {
public static final Logger logger = LoggerFactory.getLogger(EnterpriseWeChatUtils.class);
private static final String enterpriseWeChatCorpId = getString(Constants.ENTERPRISE_WECHAT_CORP_ID);
private static final String enterpriseWeChatSecret = getString(Constants.ENTERPRISE_WECHAT_SECRET);
private static final String enterpriseWeChatTokenUrl = getString(Constants.ENTERPRISE_WECHAT_TOKEN_URL);
private String enterpriseWeChatTokenUrlReplace = enterpriseWeChatTokenUrl
.replaceAll("\\$corpId", enterpriseWeChatCorpId)
.replaceAll("\\$secret", enterpriseWeChatSecret);
private static final String enterpriseWeChatPushUrl = getString(Constants.ENTERPRISE_WECHAT_PUSH_URL);
private static final String enterpriseWeChatTeamSendMsg = getString(Constants.ENTERPRISE_WECHAT_TEAM_SEND_MSG);
private static final String enterpriseWeChatUserSendMsg = getString(Constants.ENTERPRISE_WECHAT_USER_SEND_MSG);
/**
* get winxin token info
* @return token string info
* @throws IOException
*/
public String getToken() throws IOException {
String resp;
CloseableHttpClient httpClient = HttpClients.createDefault();
HttpGet httpGet = new HttpGet(enterpriseWeChatTokenUrlReplace);
CloseableHttpResponse response = httpClient.execute(httpGet);
try {
HttpEntity entity = response.getEntity();
resp = EntityUtils.toString(entity, "utf-8");
EntityUtils.consume(entity);
} finally {
response.close();
}
Map<String, Object> map = JSON.parseObject(resp,
new TypeToken<Map<String, Object>>() {
}.getType());
return map.get("access_token").toString();
}
/**
* make team single weixin message
* @param toParty
* @param agentId
* @param msg
* @return weixin send message
*/
public String makeTeamSendMsg(String toParty, String agentId, String msg) {
return enterpriseWeChatTeamSendMsg.replaceAll("\\$toParty", toParty)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
}
/**
* make team multi weixin message
* @param toParty
* @param agentId
* @param msg
* @return weixin send message
*/
public String makeTeamSendMsg(Collection<String> toParty, String agentId, String msg) {
String listParty = FuncUtils.mkString(toParty, "|");
return enterpriseWeChatTeamSendMsg.replaceAll("\\$toParty", listParty)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
}
/**
* make team single user message
* @param toUser
* @param agentId
* @param msg
* @return weixin send message
*/
public String makeUserSendMsg(String toUser, String agentId, String msg) {
return enterpriseWeChatUserSendMsg.replaceAll("\\$toUser", toUser)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
}
/**
* make team multi user message
* @param toUser
* @param agentId
* @param msg
* @return weixin send message
*/
public String makeUserSendMsg(Collection<String> toUser, String agentId, String msg) {
String listUser = FuncUtils.mkString(toUser, "|");
return enterpriseWeChatUserSendMsg.replaceAll("\\$toUser", listUser)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
}
/**
* send weixin
* @param charset
* @param data
* @param token
* @return weixin resp, demo: {"errcode":0,"errmsg":"ok","invaliduser":""}
* @throws IOException
*/
public String sendQiyeWeixin(String charset, String data, String token) throws IOException {
String enterpriseWeChatPushUrlReplace = enterpriseWeChatPushUrl.replaceAll("\\$token", token);
CloseableHttpClient httpclient = HttpClients.createDefault();
HttpPost httpPost = new HttpPost(enterpriseWeChatPushUrlReplace);
httpPost.setEntity(new StringEntity(data, charset));
CloseableHttpResponse response = httpclient.execute(httpPost);
String resp;
try {
HttpEntity entity = response.getEntity();
resp = EntityUtils.toString(entity, charset);
EntityUtils.consume(entity);
} finally {
response.close();
}
logger.info("qiye weixin send [{}], param:{}, resp:{}", enterpriseWeChatPushUrl, data, resp);
return resp;
}
}

34
escheduler-alert/src/main/java/cn/escheduler/alert/utils/FuncUtils.java

@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.alert.utils;
public class FuncUtils {
static public String mkString(Iterable<String> list, String split) {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (String item : list) {
if (first)
first = false;
else
sb.append(split);
sb.append(item);
}
return sb.toString();
}
}

22
escheduler-alert/src/main/java/cn/escheduler/alert/utils/MailUtils.java

@ -33,8 +33,10 @@ import org.springframework.util.ResourceUtils;
import javax.mail.*;
import javax.mail.internet.*;
import java.io.*;
import java.security.Security;
import java.util.*;
import static cn.escheduler.alert.utils.PropertyUtils.getBoolean;
import static cn.escheduler.alert.utils.PropertyUtils.getInt;
import static cn.escheduler.alert.utils.PropertyUtils.getString;
@ -56,8 +58,16 @@ public class MailUtils {
public static final String mailPasswd = getString(Constants.MAIL_PASSWD);
public static final Boolean mailUseStartTLS = getBoolean(Constants.MAIL_SMTP_STARTTLS_ENABLE);
public static final Boolean mailUseSSL = getBoolean(Constants.MAIL_SMTP_SSL_ENABLE);
public static final String xlsFilePath = getString(Constants.XLS_FILE_PATH);
public static final String starttlsEnable = getString(Constants.MAIL_SMTP_STARTTLS_ENABLE);
public static final String sslEnable = getString(Constants.MAIL_SMTP_SSL_ENABLE);
private static Template MAIL_TEMPLATE;
static {
@ -122,7 +132,10 @@ public class MailUtils {
//set charset
email.setCharset(Constants.UTF_8);
// TLS verification
email.setTLS(true);
email.setTLS(Boolean.valueOf(starttlsEnable));
// SSL verification
email.setSSL(Boolean.valueOf(sslEnable));
if (CollectionUtils.isNotEmpty(receivers)){
// receivers mail
for (String receiver : receivers) {
@ -152,6 +165,7 @@ public class MailUtils {
return retMap;
}catch (Exception e){
handleException(receivers, retMap, e);
return retMap;
}
}
return retMap;
@ -269,11 +283,15 @@ public class MailUtils {
* @throws MessagingException
*/
private static MimeMessage getMimeMessage(Collection<String> receivers) throws MessagingException {
// Security.addProvider(new com.sun.net.ssl.internal.ssl.Provider());
// final String SSL_FACTORY = "javax.net.ssl.SSLSocketFactory";
Properties props = new Properties();
props.setProperty(Constants.MAIL_HOST, mailServerHost);
props.setProperty(Constants.MAIL_SMTP_AUTH, Constants.STRING_TRUE);
props.setProperty(Constants.MAIL_TRANSPORT_PROTOCOL, mailProtocol);
props.setProperty(Constants.MAIL_SMTP_STARTTLS_ENABLE, Constants.STRING_TRUE);
props.setProperty(Constants.MAIL_SMTP_STARTTLS_ENABLE, starttlsEnable);
props.setProperty("mail.smtp.ssl.enable", sslEnable);
Authenticator auth = new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {

14
escheduler-alert/src/main/resources/alert.properties

@ -8,9 +8,21 @@ mail.server.port=25
mail.sender=xxxxxxx
mail.passwd=xxxxxxx
# TLS
mail.smtp.starttls.enable=false
# SSL
mail.smtp.ssl.enable=true
#xls file path,need create if not exist
xls.file.path=/opt/xls
xls.file.path=/tmp/xls
# Enterprise WeChat configuration
enterprise.wechat.corp.id=xxxxxxx
enterprise.wechat.secret=xxxxxxx
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}

39
escheduler-alert/src/main/resources/mail_templates/alert_mail_template.ftl

@ -1,38 +1 @@
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'>
<html>
<head><title> easyscheduler </title>
<meta name='Keywords' content=''>
<meta name='Description' content=''>
<style type="text/css">table {
font-size: 14px;
color: #333333;
border-width: 1px;
border-color: #666666;
border-collapse: collapse;
}
table th {
border-width: 1px;
padding: 8px;
border-style: solid;
border-color: #666666;
background-color: #dedede;
}
table td {
border-width: 1px;
padding: 8px;
border-style: solid;
border-color: #666666;
background-color: #ffffff;
}</style>
</head>
<body>
<table>
<thead>
<#if title??> ${title} </#if>
</thead>
<#if content??> ${content} </#if>
</table>
</body>
</html>
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> easyscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html>

110
escheduler-alert/src/test/java/cn/escheduler/alert/utils/EnterpriseWeChatUtilsTest.java

@ -0,0 +1,110 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.alert.utils;
import org.junit.Assert;
import org.junit.Test;
import com.alibaba.fastjson.JSON;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
/**
* Please manually modify the configuration file before testing.
* file: alert.properties
* enterprise.wechat.corp.id
* enterprise.wechat.secret
* enterprise.wechat.token.url
* enterprise.wechat.push.url
* enterprise.wechat.send.msg
*/
public class EnterpriseWeChatUtilsTest {
// Please change
private String agentId = "1000002"; // app id
private String partyId = "2";
private Collection<String> listPartyId = Arrays.asList("2","4");
private String userId = "test1";
private Collection<String> listUserId = Arrays.asList("test1","test2");
@Test
public void testSendSingleTeamWeChat() {
EnterpriseWeChatUtils wx = new EnterpriseWeChatUtils();
try {
String token = wx.getToken();
String msg = wx.makeTeamSendMsg(partyId, agentId, "hello world");
String resp = wx.sendQiyeWeixin("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
public void testSendMultiTeamWeChat() {
EnterpriseWeChatUtils wx = new EnterpriseWeChatUtils();
try {
String token = wx.getToken();
String msg = wx.makeTeamSendMsg(listPartyId, agentId, "hello world");
String resp = wx.sendQiyeWeixin("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
public void testSendSingleUserWeChat() {
EnterpriseWeChatUtils wx = new EnterpriseWeChatUtils();
try {
String token = wx.getToken();
String msg = wx.makeUserSendMsg(userId, agentId, "hello world");
String resp = wx.sendQiyeWeixin("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
public void testSendMultiUserWeChat() {
EnterpriseWeChatUtils wx = new EnterpriseWeChatUtils();
try {
String token = wx.getToken();
String msg = wx.makeUserSendMsg(listUserId, agentId, "hello world");
String resp = wx.sendQiyeWeixin("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
} catch (IOException e) {
e.printStackTrace();
}
}
}

6
escheduler-api/pom.xml

@ -4,7 +4,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.4-SNAPSHOT</version>
</parent>
<artifactId>escheduler-api</artifactId>
<packaging>jar</packaging>
@ -32,6 +32,10 @@
<artifactId>leveldbjni-all</artifactId>
<groupId>org.fusesource.leveldbjni</groupId>
</exclusion>
<exclusion>
<artifactId>protobuf-java</artifactId>
<groupId>com.google.protobuf</groupId>
</exclusion>
</exclusions>
</dependency>

2
escheduler-api/src/main/java/cn/escheduler/api/controller/AccessTokenController.java

@ -22,6 +22,7 @@ import cn.escheduler.api.service.AccessTokenService;
import cn.escheduler.api.service.UsersService;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -127,6 +128,7 @@ public class AccessTokenController extends BaseController{
if(result.get(Constants.STATUS) != Status.SUCCESS){
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = accessTokenService.queryAccessTokenList(loginUser, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){

2
escheduler-api/src/main/java/cn/escheduler/api/controller/AlertGroupController.java

@ -20,6 +20,7 @@ import cn.escheduler.api.service.AlertGroupService;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.AlertType;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -131,6 +132,7 @@ public class AlertGroupController extends BaseController{
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = alertGroupService.listPaging(loginUser, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){

2
escheduler-api/src/main/java/cn/escheduler/api/controller/DataSourceController.java

@ -21,6 +21,7 @@ import cn.escheduler.api.service.DataSourceService;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.DbType;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -228,6 +229,7 @@ public class DataSourceController extends BaseController {
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = dataSourceService.queryDataSourceListPaging(loginUser, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
} catch (Exception e) {

52
escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessDefinitionController.java

@ -20,6 +20,7 @@ import cn.escheduler.api.enums.Status;
import cn.escheduler.api.service.ProcessDefinitionService;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.*;
import org.slf4j.Logger;
@ -277,6 +278,7 @@ public class ProcessDefinitionController extends BaseController{
if(result.get(Constants.STATUS) != Status.SUCCESS){
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = processDefinitionService.queryProcessDefinitionListPaging(loginUser, projectName, searchVal, pageNo, pageSize, userId);
return returnDataListPaging(result);
}catch (Exception e){
@ -378,4 +380,54 @@ public class ProcessDefinitionController extends BaseController{
}
}
/**
* delete process definition by id
*
* @param loginUser
* @param projectName
* @param processDefinitionId
* @return
*/
@GetMapping(value="/delete")
@ResponseStatus(HttpStatus.OK)
public Result deleteProcessDefinitionById(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@PathVariable String projectName,
@RequestParam("processDefinitionId") Integer processDefinitionId
){
try{
logger.info("delete process definition by id, login user:{}, project name:{}, process definition id:{}",
loginUser.getUserName(), projectName, processDefinitionId);
Map<String, Object> result = processDefinitionService.deleteProcessDefinitionById(loginUser, projectName, processDefinitionId);
return returnDataList(result);
}catch (Exception e){
logger.error(DELETE_PROCESS_DEFINE_BY_ID_ERROR.getMsg(),e);
return error(Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR.getCode(), Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR.getMsg());
}
}
/**
* batch delete process definition by ids
*
* @param loginUser
* @param projectName
* @param processDefinitionIds
* @return
*/
@GetMapping(value="/batch-delete")
@ResponseStatus(HttpStatus.OK)
public Result batchDeleteProcessDefinitionByIds(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@PathVariable String projectName,
@RequestParam("processDefinitionIds") String processDefinitionIds
){
try{
logger.info("delete process definition by ids, login user:{}, project name:{}, process definition ids:{}",
loginUser.getUserName(), projectName, processDefinitionIds);
Map<String, Object> result = processDefinitionService.batchDeleteProcessDefinitionByIds(loginUser, projectName, processDefinitionIds);
return returnDataList(result);
}catch (Exception e){
logger.error(BATCH_DELETE_PROCESS_DEFINE_BY_IDS_ERROR.getMsg(),e);
return error(Status.BATCH_DELETE_PROCESS_DEFINE_BY_IDS_ERROR.getCode(), Status.BATCH_DELETE_PROCESS_DEFINE_BY_IDS_ERROR.getMsg());
}
}
}

34
escheduler-api/src/main/java/cn/escheduler/api/controller/ProcessInstanceController.java

@ -22,6 +22,9 @@ import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.ExecutionStatus;
import cn.escheduler.common.enums.Flag;
import cn.escheduler.common.queue.ITaskQueue;
import cn.escheduler.common.queue.TaskQueueFactory;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.*;
import org.slf4j.Logger;
@ -86,6 +89,7 @@ public class ProcessInstanceController extends BaseController{
"search value:{},state type:{},host:{},start time:{}, end time:{},page number:{}, page size:{}",
loginUser.getUserName(), projectName, processDefinitionId, searchVal, stateType,host,
startTime, endTime, pageNo, pageSize);
searchVal = ParameterUtils.handleEscapes(searchVal);
Map<String, Object> result = processInstanceService.queryProcessInstanceList(
loginUser, projectName, processDefinitionId, startTime, endTime, searchVal, stateType, host, pageNo, pageSize);
return returnDataListPaging(result);
@ -223,7 +227,9 @@ public class ProcessInstanceController extends BaseController{
try{
logger.info("delete process instance by id, login user:{}, project name:{}, process instance id:{}",
loginUser.getUserName(), projectName, processInstanceId);
Map<String, Object> result = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId);
// task queue
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
Map<String, Object> result = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId,tasksQueue);
return returnDataList(result);
}catch (Exception e){
logger.error(DELETE_PROCESS_INSTANCE_BY_ID_ERROR.getMsg(),e);
@ -332,4 +338,30 @@ public class ProcessInstanceController extends BaseController{
return error(Status.ENCAPSULATION_PROCESS_INSTANCE_GANTT_STRUCTURE_ERROR.getCode(),ENCAPSULATION_PROCESS_INSTANCE_GANTT_STRUCTURE_ERROR.getMsg());
}
}
/**
* batch delete process instance by ids, at the same time,
* delete task instance and their mapping relation data
*
* @param loginUser
* @param projectName
* @param processInstanceIds
* @return
*/
@GetMapping(value="/batch-delete")
@ResponseStatus(HttpStatus.OK)
public Result batchDeleteProcessInstanceByIds(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@PathVariable String projectName,
@RequestParam("processInstanceIds") String processInstanceIds
){
try{
logger.info("delete process instance by ids, login user:{}, project name:{}, process instance ids :{}",
loginUser.getUserName(), projectName, processInstanceIds);
Map<String, Object> result = processInstanceService.batchDeleteProcessInstanceByIds(loginUser, projectName, processInstanceIds);
return returnDataList(result);
}catch (Exception e){
logger.error(BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR.getMsg(),e);
return error(Status.BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR.getCode(), Status.BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR.getMsg());
}
}
}

2
escheduler-api/src/main/java/cn/escheduler/api/controller/ProjectController.java

@ -21,6 +21,7 @@ import cn.escheduler.api.enums.Status;
import cn.escheduler.api.service.ProjectService;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -161,6 +162,7 @@ public class ProjectController extends BaseController {
try {
logger.info("login user {}, query project list paging", loginUser.getUserName());
searchVal = ParameterUtils.handleEscapes(searchVal);
Map<String, Object> result = projectService.queryProjectListPaging(loginUser, pageSize, pageNo, searchVal);
return returnDataListPaging(result);
} catch (Exception e) {

2
escheduler-api/src/main/java/cn/escheduler/api/controller/QueueController.java

@ -21,6 +21,7 @@ import cn.escheduler.api.enums.Status;
import cn.escheduler.api.service.QueueService;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -95,6 +96,7 @@ public class QueueController extends BaseController{
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = queueService.queryList(loginUser,searchVal,pageNo,pageSize);
return returnDataListPaging(result);
}catch (Exception e){

6
escheduler-api/src/main/java/cn/escheduler/api/controller/ResourcesController.java

@ -23,6 +23,7 @@ import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.ResourceType;
import cn.escheduler.common.enums.UdfType;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -178,6 +179,7 @@ public class ResourcesController extends BaseController{
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = resourceService.queryResourceListPaging(loginUser,type,searchVal,pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
@ -234,9 +236,9 @@ public class ResourcesController extends BaseController{
) {
try {
logger.info("login user {}, verfiy resource alias: {},resource type: {}",
loginUser.getUserName(), alias);
loginUser.getUserName(), alias,type);
return resourceService.verifyResourceName(alias, type);
return resourceService.verifyResourceName(alias,type,loginUser);
} catch (Exception e) {
logger.error(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg(), e);
return error(Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getCode(), Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg());

31
escheduler-api/src/main/java/cn/escheduler/api/controller/SchedulerController.java

@ -24,6 +24,7 @@ import cn.escheduler.common.enums.FailureStrategy;
import cn.escheduler.common.enums.Priority;
import cn.escheduler.common.enums.ReleaseState;
import cn.escheduler.common.enums.WarningType;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.*;
import org.slf4j.Logger;
@ -228,7 +229,6 @@ public class SchedulerController extends BaseController {
*/
@ApiOperation(value = "queryScheduleListPaging", notes= "QUERY_SCHEDULE_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true,dataType = "Int", example = "100"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", type = "String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "100"),
@ -245,6 +245,7 @@ public class SchedulerController extends BaseController {
logger.info("login user {}, query schedule, project name: {}, process definition id: {}",
loginUser.getUserName(), projectName, processDefinitionId);
try {
searchVal = ParameterUtils.handleEscapes(searchVal);
Map<String, Object> result = schedulerService.querySchedule(loginUser, projectName, processDefinitionId, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
@ -254,6 +255,34 @@ public class SchedulerController extends BaseController {
}
/**
* delete schedule by id
*
* @param loginUser
* @param projectName
* @param scheduleId
* @return
*/
@ApiOperation(value = "deleteScheduleById", notes= "OFFLINE_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "scheduleId", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value="/delete")
@ResponseStatus(HttpStatus.OK)
public Result deleteScheduleById(@RequestAttribute(value = SESSION_USER) User loginUser,
@PathVariable String projectName,
@RequestParam("scheduleId") Integer scheduleId
){
try{
logger.info("delete schedule by id, login user:{}, project name:{}, schedule id:{}",
loginUser.getUserName(), projectName, scheduleId);
Map<String, Object> result = schedulerService.deleteScheduleById(loginUser, projectName, scheduleId);
return returnDataList(result);
}catch (Exception e){
logger.error(DELETE_SCHEDULE_CRON_BY_ID_ERROR.getMsg(),e);
return error(Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR.getCode(), Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR.getMsg());
}
}
/**
* query schedule list
*

2
escheduler-api/src/main/java/cn/escheduler/api/controller/TaskInstanceController.java

@ -21,6 +21,7 @@ import cn.escheduler.api.service.TaskInstanceService;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.ExecutionStatus;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.*;
import org.slf4j.Logger;
@ -83,6 +84,7 @@ public class TaskInstanceController extends BaseController{
try{
logger.info("query task instance list, project name:{},process instance:{}, search value:{},task name:{}, state type:{}, host:{}, start:{}, end:{}",
projectName, processInstanceId, searchVal, taskName, stateType, host, startTime, endTime);
searchVal = ParameterUtils.handleEscapes(searchVal);
Map<String, Object> result = taskInstanceService.queryTaskListPaging(
loginUser, projectName, processInstanceId, taskName, startTime, endTime, searchVal, stateType, host, pageNo, pageSize);
return returnDataListPaging(result);

3
escheduler-api/src/main/java/cn/escheduler/api/controller/TenantController.java

@ -21,7 +21,9 @@ import cn.escheduler.api.enums.Status;
import cn.escheduler.api.service.TenantService;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import org.apache.commons.lang3.StringUtils;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
@ -118,6 +120,7 @@ public class TenantController extends BaseController{
if(result.get(Constants.STATUS) != Status.SUCCESS){
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = tenantService.queryTenantList(loginUser, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){

23
escheduler-api/src/main/java/cn/escheduler/api/controller/UsersController.java

@ -21,6 +21,7 @@ import cn.escheduler.api.enums.Status;
import cn.escheduler.api.service.UsersService;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -120,6 +121,7 @@ public class UsersController extends BaseController{
if(result.get(Constants.STATUS) != Status.SUCCESS){
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = usersService.queryUserList(loginUser, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
@ -338,6 +340,26 @@ public class UsersController extends BaseController{
@GetMapping(value="/list")
@ResponseStatus(HttpStatus.OK)
public Result listUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser){
logger.info("login user {}, user list");
try{
Map<String, Object> result = usersService.queryAllGeneralUsers(loginUser);
return returnDataList(result);
}catch (Exception e){
logger.error(USER_LIST_ERROR.getMsg(),e);
return error(Status.USER_LIST_ERROR.getCode(), Status.USER_LIST_ERROR.getMsg());
}
}
/**
* user list no paging
*
* @param loginUser
* @return
*/
@GetMapping(value="/list-all")
@ResponseStatus(HttpStatus.OK)
public Result listAll(@RequestAttribute(value = Constants.SESSION_USER) User loginUser){
logger.info("login user {}, user list");
try{
Map<String, Object> result = usersService.queryUserList(loginUser);
@ -348,6 +370,7 @@ public class UsersController extends BaseController{
}
}
/**
* verify username
*

2
escheduler-api/src/main/java/cn/escheduler/api/controller/WorkerGroupController.java

@ -20,6 +20,7 @@ package cn.escheduler.api.controller;
import cn.escheduler.api.enums.Status;
import cn.escheduler.api.service.WorkerGroupService;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -109,6 +110,7 @@ public class WorkerGroupController extends BaseController{
loginUser.getUserName() , pageNo, pageSize, searchVal);
try {
searchVal = ParameterUtils.handleEscapes(searchVal);
Map<String, Object> result = workerGroupService.queryAllGroupPaging(pageNo, pageSize, searchVal);
return returnDataListPaging(result);
}catch (Exception e){

12
escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java

@ -159,6 +159,8 @@ public enum Status {
NAME_NULL(10134,"name must be not null"),
NAME_EXIST(10135, "name {0} already exists"),
SAVE_ERROR(10136, "save error"),
DELETE_PROJECT_ERROR_DEFINES_NOT_NULL(10137, "please delete the process definitions in project first!"),
BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR(10117,"batch delete process instance by ids {0} error"),
@ -172,6 +174,8 @@ public enum Status {
RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified"),
UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar"),
HDFS_COPY_FAIL(20009, "hdfs copy {0} -> {1} fail"),
RESOURCE_FILE_EXIST(20010, "resource file {0} already exists in hdfs,please delete it or change name!"),
RESOURCE_FILE_NOT_EXIST(20011, "resource file {0} not exists in hdfs!"),
@ -200,8 +204,16 @@ public enum Status {
DATA_IS_NULL(50018,"data %s is null"),
PROCESS_NODE_HAS_CYCLE(50019,"process node has cycle"),
PROCESS_NODE_S_PARAMETER_INVALID(50020,"process node %s parameter invalid"),
PROCESS_DEFINE_STATE_ONLINE(50021, "process definition {0} is already on line"),
DELETE_PROCESS_DEFINE_BY_ID_ERROR(50022,"delete process definition by id error"),
SCHEDULE_CRON_STATE_ONLINE(50023,"the status of schedule {0} is already on line"),
DELETE_SCHEDULE_CRON_BY_ID_ERROR(50024,"delete schedule by id error"),
BATCH_DELETE_PROCESS_DEFINE_ERROR(50025,"batch delete process definition error"),
BATCH_DELETE_PROCESS_DEFINE_BY_IDS_ERROR(50026,"batch delete process definition by ids {0} error"),
HDFS_NOT_STARTUP(60001,"hdfs not startup"),
HDFS_TERANT_RESOURCES_FILE_EXISTS(60002,"resource file exists,please delete resource first"),
HDFS_TERANT_UDFS_FILE_EXISTS(60003,"udf file exists,please delete resource first"),
/**
* for monitor

2
escheduler-api/src/main/java/cn/escheduler/api/service/ExecutorService.java

@ -178,10 +178,12 @@ public class ExecutorService extends BaseService{
}
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
if(executeType != ExecuteType.STOP && executeType != ExecuteType.PAUSE){
result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionId());
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
}
checkResult = checkExecuteType(processInstance, executeType);
Status status = (Status) checkResult.get(Constants.STATUS);

123
escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java

@ -38,6 +38,7 @@ import cn.escheduler.dao.mapper.*;
import cn.escheduler.dao.model.*;
import com.alibaba.fastjson.JSON;
import com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -292,12 +293,12 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefine.setTimeout(processData.getTimeout());
//custom global params
List<Property> globalParamsList = processData.getGlobalParams();
if (globalParamsList != null && globalParamsList.size() > 0) {
Set<Property> userDefParamsSet = new HashSet<>(globalParamsList);
List<Property> globalParamsList = new ArrayList<>();
if (processData.getGlobalParams() != null && processData.getGlobalParams().size() > 0) {
Set<Property> userDefParamsSet = new HashSet<>(processData.getGlobalParams());
globalParamsList = new ArrayList<>(userDefParamsSet);
processDefine.setGlobalParamList(globalParamsList);
}
processDefine.setGlobalParamList(globalParamsList);
processDefine.setUpdateTime(now);
processDefine.setFlag(Flag.YES);
if (processDefineMapper.update(processDefine) > 0) {
@ -336,6 +337,120 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
/**
* delete process definition by id
*
* @param loginUser
* @param projectName
* @param processDefinitionId
* @return
*/
@Transactional(value = "TransactionManager", rollbackFor = Exception.class)
public Map<String, Object> deleteProcessDefinitionById(User loginUser, String projectName, Integer processDefinitionId) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(processDefinitionId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionId);
return result;
}
// Determine if the login user is the owner of the process definition
if (loginUser.getId() != processDefinition.getUserId()) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
// check process definition is already online
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE,processDefinitionId);
return result;
}
// get the timing according to the process definition
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
if (!schedules.isEmpty() && schedules.size() > 1) {
logger.warn("scheduler num is {},Greater than 1",schedules.size());
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR);
return result;
}else if(schedules.size() == 1){
Schedule schedule = schedules.get(0);
if(schedule.getReleaseState() == ReleaseState.OFFLINE){
scheduleMapper.delete(schedule.getId());
}else if(schedule.getReleaseState() == ReleaseState.ONLINE){
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE,schedule.getId());
return result;
}
}
int delete = processDefineMapper.delete(processDefinitionId);
if (delete > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR);
}
return result;
}
/**
* batch delete process definition by ids
*
* @param loginUser
* @param projectName
* @param processDefinitionIds
* @return
*/
public Map<String, Object> batchDeleteProcessDefinitionByIds(User loginUser, String projectName, String processDefinitionIds) {
Map<String, Object> result = new HashMap<>(5);
Map<String, Object> deleteReuslt = new HashMap<>(5);
List<Integer> deleteFailedIdList = new ArrayList<Integer>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
if(StringUtils.isNotEmpty(processDefinitionIds)){
String[] processInstanceIdArray = processDefinitionIds.split(",");
for (String strProcessInstanceId:processInstanceIdArray) {
int processInstanceId = Integer.parseInt(strProcessInstanceId);
try {
deleteReuslt = deleteProcessDefinitionById(loginUser, projectName, processInstanceId);
if(!Status.SUCCESS.equals(deleteReuslt.get(Constants.STATUS))){
deleteFailedIdList.add(processInstanceId);
logger.error((String)deleteReuslt.get(Constants.MSG));
}
} catch (Exception e) {
deleteFailedIdList.add(processInstanceId);
}
}
}
if(deleteFailedIdList.size() > 0){
putMsg(result, Status.BATCH_DELETE_PROCESS_DEFINE_BY_IDS_ERROR,StringUtils.join(deleteFailedIdList.toArray(),","));
}else{
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* release process definition: online / offline
*

80
escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java

@ -30,6 +30,8 @@ import cn.escheduler.common.graph.DAG;
import cn.escheduler.common.model.TaskNode;
import cn.escheduler.common.model.TaskNodeRelation;
import cn.escheduler.common.process.Property;
import cn.escheduler.common.queue.ITaskQueue;
import cn.escheduler.common.queue.TaskQueueFactory;
import cn.escheduler.common.utils.CollectionUtils;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.common.utils.JSONUtils;
@ -446,13 +448,13 @@ public class ProcessInstanceService extends BaseDAGService {
/**
* delete process instance by id, at the same timedelete task instance and their mapping relation data
*
* @param loginUser
* @param projectName
* @param workflowId
* @param processInstanceId
* @param tasksQueue
* @return
*/
public Map<String, Object> deleteProcessInstanceById(User loginUser, String projectName, Integer workflowId) {
public Map<String, Object> deleteProcessInstanceById(User loginUser, String projectName, Integer processInstanceId,ITaskQueue tasksQueue) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
@ -462,16 +464,34 @@ public class ProcessInstanceService extends BaseDAGService {
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessInstance processInstance = processDao.findProcessInstanceDetailById(workflowId);
ProcessInstance processInstance = processDao.findProcessInstanceDetailById(processInstanceId);
List<TaskInstance> taskInstanceList = processDao.findValidTaskListByProcessId(processInstanceId);
//process instance priority
int processInstancePriority = processInstance.getProcessInstancePriority().ordinal();
if (processInstance == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, workflowId);
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
return result;
}
int delete = processDao.deleteWorkProcessInstanceById(workflowId);
processDao.deleteAllSubWorkProcessByParentId(workflowId);
processDao.deleteWorkProcessMapByParentId(workflowId);
int delete = processDao.deleteWorkProcessInstanceById(processInstanceId);
processDao.deleteAllSubWorkProcessByParentId(processInstanceId);
processDao.deleteWorkProcessMapByParentId(processInstanceId);
if (delete > 0) {
if (CollectionUtils.isNotEmpty(taskInstanceList)){
for (TaskInstance taskInstance : taskInstanceList){
// task instance priority
int taskInstancePriority = taskInstance.getTaskInstancePriority().ordinal();
String nodeValue=processInstancePriority + "_" + processInstanceId + "_" +taskInstancePriority + "_" + taskInstance.getId();
try {
logger.info("delete task queue node : {}",nodeValue);
tasksQueue.removeNode(cn.escheduler.common.Constants.SCHEDULER_TASKS_QUEUE, nodeValue);
}catch (Exception e){
logger.error("delete task queue node : {}", nodeValue);
}
}
}
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_PROCESS_INSTANCE_BY_ID_ERROR);
@ -479,6 +499,50 @@ public class ProcessInstanceService extends BaseDAGService {
return result;
}
/**
* batch delete process instance by ids, at the same timedelete task instance and their mapping relation data
*
* @param loginUser
* @param projectName
* @param processInstanceIds
* @return
*/
public Map<String, Object> batchDeleteProcessInstanceByIds(User loginUser, String projectName, String processInstanceIds) {
// task queue
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
Map<String, Object> result = new HashMap<>(5);
List<Integer> deleteFailedIdList = new ArrayList<Integer>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
if(StringUtils.isNotEmpty(processInstanceIds)){
String[] processInstanceIdArray = processInstanceIds.split(",");
for (String strProcessInstanceId:processInstanceIdArray) {
int processInstanceId = Integer.parseInt(strProcessInstanceId);
try {
deleteProcessInstanceById(loginUser, projectName, processInstanceId,tasksQueue);
} catch (Exception e) {
deleteFailedIdList.add(processInstanceId);
}
}
}
if(deleteFailedIdList.size() > 0){
putMsg(result, Status.BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR,StringUtils.join(deleteFailedIdList.toArray(),","));
}else{
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* view process instance variables
*

11
escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java

@ -20,9 +20,11 @@ import cn.escheduler.api.enums.Status;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.PageInfo;
import cn.escheduler.common.enums.UserType;
import cn.escheduler.dao.mapper.ProcessDefinitionMapper;
import cn.escheduler.dao.mapper.ProjectMapper;
import cn.escheduler.dao.mapper.ProjectUserMapper;
import cn.escheduler.dao.mapper.UserMapper;
import cn.escheduler.dao.model.ProcessDefinition;
import cn.escheduler.dao.model.Project;
import cn.escheduler.dao.model.ProjectUser;
import cn.escheduler.dao.model.User;
@ -55,6 +57,9 @@ public class ProjectService extends BaseService{
@Autowired
private ProjectUserMapper projectUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create project
*
@ -199,6 +204,12 @@ public class ProjectService extends BaseService{
if (checkResult != null) {
return checkResult;
}
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryAllDefinitionList(projectId);
if(processDefinitionList.size() > 0){
putMsg(result, Status.DELETE_PROJECT_ERROR_DEFINES_NOT_NULL);
return result;
}
int delete = projectMapper.delete(projectId);
if (delete > 0) {

73
escheduler-api/src/main/java/cn/escheduler/api/service/ResourcesService.java

@ -420,6 +420,41 @@ public class ResourcesService extends BaseService {
return result;
}
/**
* verify resource by name and type
* @param name
* @param type
* @param loginUser
* @return
*/
public Result verifyResourceName(String name, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
Resource resource = resourcesMapper.queryResourceByNameAndType(name, type.ordinal());
if (resource != null) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, name);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
try {
String hdfsFilename = getHdfsFileName(type,tenantCode,name);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, name,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
}
return result;
}
/**
* verify resource by name and type
*
@ -480,6 +515,7 @@ public class ResourcesService extends BaseService {
String hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
@ -487,6 +523,11 @@ public class ResourcesService extends BaseService {
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, StringUtils.join(content.toArray(), "\n"));
result.setData(map);
}else{
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error(String.format("Resource %s read failed", hdfsFileName), e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
@ -530,17 +571,14 @@ public class ResourcesService extends BaseService {
String name = fileName.trim() + "." + nameSuffix;
//check file already exists
Resource resource = resourcesMapper.queryResourceByNameAndType(name, type.ordinal());
if (resource != null) {
logger.error("resource {} has exist, can't recreate .", name);
putMsg(result, Status.RESOURCE_EXIST);
result = verifyResourceName(name,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
resource = new Resource(name,name,desc,loginUser.getId(),type,content.getBytes().length,now,now);
Resource resource = new Resource(name,name,desc,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
@ -569,6 +607,7 @@ public class ResourcesService extends BaseService {
* @param resourceId
* @return
*/
@Transactional(value = "TransactionManager",rollbackFor = Exception.class)
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
@ -597,6 +636,10 @@ public class ResourcesService extends BaseService {
}
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.update(resource);
User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
@ -643,6 +686,7 @@ public class ResourcesService extends BaseService {
logger.error("{} is not exist", resourcePath);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("%s is not exist", resourcePath));
return result;
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
@ -809,6 +853,23 @@ public class ResourcesService extends BaseService {
return hdfsFileName;
}
/**
* get hdfs file name
*
* @param resourceType
* @param tenantCode
* @param hdfsFileName
* @return
*/
private String getHdfsFileName(ResourceType resourceType, String tenantCode, String hdfsFileName) {
if (resourceType.equals(ResourceType.FILE)) {
hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, hdfsFileName);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, hdfsFileName);
}
return hdfsFileName;
}
/**
* get authorized resource list
*

49
escheduler-api/src/main/java/cn/escheduler/api/service/SchedulerService.java

@ -488,4 +488,53 @@ public class SchedulerService extends BaseService {
}
return null;
}
/**
* delete schedule by id
*
* @param loginUser
* @param projectName
* @param scheduleId
* @return
*/
public Map<String, Object> deleteScheduleById(User loginUser, String projectName, Integer scheduleId) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
Schedule schedule = scheduleMapper.queryById(scheduleId);
if (schedule == null) {
putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, scheduleId);
return result;
}
// Determine if the login user is the owner of the schedule
if (loginUser.getId() != schedule.getUserId()) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
// check schedule is already online
if(schedule.getReleaseState() == ReleaseState.ONLINE){
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE,schedule.getId());
return result;
}
int delete = scheduleMapper.delete(scheduleId);
if (delete > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR);
}
return result;
}
}

25
escheduler-api/src/main/java/cn/escheduler/api/service/TenantService.java

@ -26,6 +26,7 @@ import cn.escheduler.dao.mapper.TenantMapper;
import cn.escheduler.dao.model.Tenant;
import cn.escheduler.dao.model.User;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -165,7 +166,7 @@ public class TenantService extends BaseService{
Tenant tenant = tenantMapper.queryById(id);
if (tenant == null){
putMsg(result, Status.USER_NOT_EXIST, id);
putMsg(result, Status.TENANT_NOT_EXIST);
return result;
}
@ -219,6 +220,7 @@ public class TenantService extends BaseService{
* @param id
* @return
*/
@Transactional(value = "TransactionManager", rollbackFor = Exception.class)
public Map<String, Object> deleteTenantById(User loginUser, int id) throws Exception {
Map<String, Object> result = new HashMap<>(5);
@ -228,8 +230,29 @@ public class TenantService extends BaseService{
Tenant tenant = tenantMapper.queryById(id);
if (tenant == null){
putMsg(result, Status.TENANT_NOT_EXIST);
return result;
}
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
String tenantPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode();
String resourcePath = HadoopUtils.getHdfsDir(tenant.getTenantCode());
FileStatus[] fileStatus = HadoopUtils.getInstance().listFileStatus(resourcePath);
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_RESOURCES_FILE_EXISTS);
return result;
}
fileStatus = HadoopUtils.getInstance().listFileStatus(HadoopUtils.getHdfsUdfDir(tenant.getTenantCode()));
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_UDFS_FILE_EXISTS);
return result;
}
HadoopUtils.getInstance().delete(tenantPath, true);
}
tenantMapper.deleteById(id);
putMsg(result, Status.SUCCESS);

24
escheduler-api/src/main/java/cn/escheduler/api/service/UsersService.java

@ -115,6 +115,9 @@ public class UsersService extends BaseService {
user.setUserType(UserType.GENERAL_USER);
user.setCreateTime(now);
user.setUpdateTime(now);
if (StringUtils.isEmpty(queue)){
queue = "";
}
user.setQueue(queue);
// save user
@ -518,6 +521,27 @@ public class UsersService extends BaseService {
return result;
}
/**
* query user list
*
* @param loginUser
* @return
*/
public Map<String, Object> queryAllGeneralUsers(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
//only admin can operate
if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM, Constants.STATUS)) {
return result;
}
List<User> userList = userMapper.queryAllGeneralUsers();
result.put(Constants.DATA_LIST, userList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query user list
*

1
escheduler-api/src/main/resources/i18n/messages.properties

@ -226,3 +226,4 @@ DELETE_DATA_SOURCE_NOTES=delete data source
VERIFY_DATA_SOURCE_NOTES=verify data source
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id

1
escheduler-api/src/main/resources/i18n/messages_en_US.properties

@ -226,3 +226,4 @@ DELETE_DATA_SOURCE_NOTES=delete data source
VERIFY_DATA_SOURCE_NOTES=verify data source
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id

1
escheduler-api/src/main/resources/i18n/messages_zh_CN.properties

@ -224,3 +224,4 @@ DELETE_DATA_SOURCE_NOTES=删除数据源
VERIFY_DATA_SOURCE_NOTES=验证数据源
UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源
AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源
DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据

24
escheduler-api/src/test/java/cn/escheduler/api/controller/ResourcesControllerTest.java

@ -34,6 +34,8 @@ import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.MvcResult;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.web.context.WebApplicationContext;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
@ -43,7 +45,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
@RunWith(SpringRunner.class)
@SpringBootTest
public class ResourcesControllerTest {
private static Logger logger = LoggerFactory.getLogger(QueueControllerTest.class);
private static Logger logger = LoggerFactory.getLogger(ResourcesControllerTest.class);
private MockMvc mockMvc;
@ -71,4 +73,24 @@ public class ResourcesControllerTest {
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void verifyResourceName() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("name","list_resources_1.sh");
paramsMap.add("type","FILE");
MvcResult mvcResult = mockMvc.perform(get("/resources/verify-name")
.header("sessionId", "c24ed9d9-1c20-48a0-bd9c-5cfca14a4dcb")
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
}

23
escheduler-api/src/test/java/cn/escheduler/api/service/ProcessDefinitionServiceTest.java

@ -63,4 +63,27 @@ public class ProcessDefinitionServiceTest {
Assert.assertEquals(Status.SUCCESS, map.get(Constants.STATUS));
logger.info(JSON.toJSONString(map));
}
@Test
public void deleteProcessDefinitionByIdTest() throws Exception {
User loginUser = new User();
loginUser.setId(2);
loginUser.setUserType(UserType.GENERAL_USER);
Map<String, Object> map = processDefinitionService.deleteProcessDefinitionById(loginUser, "li_sql_test", 6);
Assert.assertEquals(Status.SUCCESS, map.get(Constants.STATUS));
logger.info(JSON.toJSONString(map));
}
@Test
public void batchDeleteProcessDefinitionByIds() throws Exception {
User loginUser = new User();
loginUser.setId(2);
loginUser.setUserType(UserType.GENERAL_USER);
Map<String, Object> map = processDefinitionService.batchDeleteProcessDefinitionByIds(loginUser, "li_test_1", "2,3");
Assert.assertEquals(Status.SUCCESS, map.get(Constants.STATUS));
logger.info(JSON.toJSONString(map));
}
}

12
escheduler-api/src/test/java/cn/escheduler/api/service/ProcessInstanceServiceTest.java

@ -75,4 +75,16 @@ public class ProcessInstanceServiceTest {
Assert.assertEquals(Status.SUCCESS, map.get(Constants.STATUS));
logger.info(JSON.toJSONString(map));
}
@Test
public void batchDeleteProcessInstanceByIds() throws Exception {
User loginUser = new User();
loginUser.setId(2);
loginUser.setUserType(UserType.GENERAL_USER);
Map<String, Object> map = processInstanceService.batchDeleteProcessInstanceByIds(loginUser, "li_test_1", "4,2,300");
Assert.assertEquals(Status.SUCCESS, map.get(Constants.STATUS));
logger.info(JSON.toJSONString(map));
}
}

45
escheduler-common/pom.xml

@ -4,7 +4,7 @@
<parent>
<artifactId>escheduler</artifactId>
<groupId>cn.analysys</groupId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.4-SNAPSHOT</version>
</parent>
<artifactId>escheduler-common</artifactId>
<name>escheduler-common</name>
@ -148,10 +148,10 @@
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<!--<exclusion>-->
<!--<groupId>com.google.protobuf</groupId>-->
<!--<artifactId>protobuf-java</artifactId>-->
<!--</exclusion>-->
</exclusions>
</dependency>
@ -175,10 +175,10 @@
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<!--<exclusion>-->
<!--<groupId>com.google.protobuf</groupId>-->
<!--<artifactId>protobuf-java</artifactId>-->
<!--</exclusion>-->
<exclusion>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-all</artifactId>
@ -230,6 +230,29 @@
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
@ -348,6 +371,10 @@
<artifactId>servlet-api-2.5</artifactId>
<groupId>org.mortbay.jetty</groupId>
</exclusion>
<exclusion>
<artifactId>jasper-runtime</artifactId>
<groupId>tomcat</groupId>
</exclusion>
</exclusions>
</dependency>

10
escheduler-common/src/main/java/cn/escheduler/common/Constants.java

@ -162,6 +162,11 @@ public final class Constants {
*/
public static final String ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_WORKERS = "zookeeper.escheduler.lock.failover.workers";
/**
* MasterServer startup failover runing and fault tolerance process
*/
public static final String ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "zookeeper.escheduler.lock.failover.startup.masters";
/**
* need send warn times when master server or worker server failover
*/
@ -252,6 +257,11 @@ public final class Constants {
*/
public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss";
/**
* date format of yyyyMMdd
*/
public static final String YYYYMMDD = "yyyyMMdd";
/**
* date format of yyyyMMddHHmmss
*/

35
escheduler-common/src/main/java/cn/escheduler/common/enums/TaskRecordStatus.java

@ -0,0 +1,35 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* task record status
*
*/
public enum TaskRecordStatus {
/**
* status
* 0 sucess
* 1 failure
* 2 exception
*/
SUCCESS,FAILURE,EXCEPTION
}

8
escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java

@ -30,5 +30,11 @@ public enum TaskType {
* 6 PYTHON
* 7 DEPENDENT
*/
SHELL,SQL, SUB_PROCESS,PROCEDURE,MR,SPARK,PYTHON,DEPENDENT
SHELL,SQL, SUB_PROCESS,PROCEDURE,MR,SPARK,PYTHON,DEPENDENT;
public static boolean typeIsNormalTask(String typeName) {
TaskType taskType = TaskType.valueOf(typeName);
return !(taskType == TaskType.SUB_PROCESS || taskType == TaskType.DEPENDENT);
}
}

3
escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java

@ -225,7 +225,10 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
String taskIdPath = tasksQueuePath + nodeValue;
logger.info("consume task {}", taskIdPath);
try{
Stat stat = zk.checkExists().forPath(taskIdPath);
if(stat != null){
zk.delete().forPath(taskIdPath);
}
}catch(Exception e){
logger.error(String.format("delete task:%s from zookeeper fail, exception:" ,nodeValue) ,e);
}

14
escheduler-common/src/main/java/cn/escheduler/common/task/sql/SqlParameters.java

@ -73,6 +73,11 @@ public class SqlParameters extends AbstractParameters {
*/
private List<String> postStatements;
/**
* title
*/
private String title;
/**
* receivers
*/
@ -139,6 +144,14 @@ public class SqlParameters extends AbstractParameters {
this.connParams = connParams;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getReceivers() {
return receivers;
}
@ -190,6 +203,7 @@ public class SqlParameters extends AbstractParameters {
", udfs='" + udfs + '\'' +
", showType='" + showType + '\'' +
", connParams='" + connParams + '\'' +
", title='" + title + '\'' +
", receivers='" + receivers + '\'' +
", receiversCc='" + receiversCc + '\'' +
", preStatements=" + preStatements +

9
escheduler-common/src/main/java/cn/escheduler/common/utils/DependentUtils.java

@ -80,6 +80,9 @@ public class DependentUtils {
case "last3Hours":
result = DependentDateUtils.getLastHoursInterval(businessDate, 3);
break;
case "today":
result = DependentDateUtils.getTodayInterval(businessDate);
break;
case "last1Days":
result = DependentDateUtils.getLastDayInterval(businessDate, 1);
break;
@ -92,6 +95,9 @@ public class DependentUtils {
case "last7Days":
result = DependentDateUtils.getLastDayInterval(businessDate, 7);
break;
case "thisWeek":
result = DependentDateUtils.getThisWeekInterval(businessDate);
break;
case "lastWeek":
result = DependentDateUtils.getLastWeekInterval(businessDate);
break;
@ -116,6 +122,9 @@ public class DependentUtils {
case "lastSunday":
result = DependentDateUtils.getLastWeekOneDayInterval(businessDate, 7);
break;
case "thisMonth":
result = DependentDateUtils.getThisMonthInterval(businessDate);
break;
case "lastMonth":
result = DependentDateUtils.getLastMonthInterval(businessDate);
break;

31
escheduler-common/src/main/java/cn/escheduler/common/utils/HadoopUtils.java

@ -28,18 +28,21 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.yarn.client.cli.RMAdminCLI;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static cn.escheduler.common.Constants.*;
import static cn.escheduler.common.utils.PropertyUtils.getInt;
import static cn.escheduler.common.utils.PropertyUtils.getString;
import static cn.escheduler.common.utils.PropertyUtils.getPrefixedProperties;
/**
* hadoop utils
@ -76,7 +79,9 @@ public class HadoopUtils implements Closeable {
if(defaultFS.startsWith("file")){
String defaultFSProp = getString(FS_DEFAULTFS);
if(StringUtils.isNotBlank(defaultFSProp)){
Map<String, String> fsRelatedProps = getPrefixedProperties("fs.");
configuration.set(FS_DEFAULTFS,defaultFSProp);
fsRelatedProps.entrySet().stream().forEach(entry -> configuration.set(entry.getKey(), entry.getValue()));
}else{
logger.error("property:{} can not to be empty, please set!");
throw new RuntimeException("property:{} can not to be empty, please set!");
@ -259,6 +264,22 @@ public class HadoopUtils implements Closeable {
return fs.exists(new Path(hdfsFilePath));
}
/**
* Gets a list of files in the directory
*
* @param filePath
* @return {@link FileStatus}
*/
public FileStatus[] listFileStatus(String filePath)throws Exception{
Path path = new Path(filePath);
try {
return fs.listStatus(new Path(filePath));
} catch (IOException e) {
logger.error("Get file list exception", e);
throw new Exception("Get file list exception", e);
}
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
@ -316,7 +337,13 @@ public class HadoopUtils implements Closeable {
* @return data hdfs path
*/
public static String getHdfsDataBasePath() {
return getString(DATA_STORE_2_HDFS_BASEPATH);
String basePath = getString(DATA_STORE_2_HDFS_BASEPATH);
if ("/".equals(basePath)) {
// if basepath is configured to /, the generated url may be //default/resources (with extra leading /)
return "";
} else {
return basePath;
}
}
/**
@ -365,7 +392,7 @@ public class HadoopUtils implements Closeable {
* @return file directory of tenants on hdfs
*/
private static String getHdfsTenantDir(String tenantCode) {
return String.format("%s/%s", getString(DATA_STORE_2_HDFS_BASEPATH), tenantCode);
return String.format("%s/%s", getHdfsDataBasePath(), tenantCode);
}

70
escheduler-common/src/main/java/cn/escheduler/common/utils/IpUtils.java

@ -0,0 +1,70 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* http utils
*/
public class IpUtils {
private static final Logger logger = LoggerFactory.getLogger(IpUtils.class);
public static final String DOT = ".";
/**
* ip str to long <p>
*
* @param ipStr ip string
*/
public static Long ipToLong(String ipStr) {
String[] ipSet = ipStr.split("\\" + DOT);
return Long.parseLong(ipSet[0]) << 24 | Long.parseLong(ipSet[1]) << 16 | Long.parseLong(ipSet[2]) << 8 | Long.parseLong(ipSet[3]);
}
/**
* long to ip
* @param ipLong the long number converted from IP
* @return String
*/
public static String longToIp(long ipLong) {
long[] ipNumbers = new long[4];
long tmp = 0xFF;
ipNumbers[0] = ipLong >> 24 & tmp;
ipNumbers[1] = ipLong >> 16 & tmp;
ipNumbers[2] = ipLong >> 8 & tmp;
ipNumbers[3] = ipLong & tmp;
StringBuilder sb = new StringBuilder(16);
sb.append(ipNumbers[0]).append(DOT)
.append(ipNumbers[1]).append(DOT)
.append(ipNumbers[2]).append(DOT)
.append(ipNumbers[3]);
return sb.toString();
}
public static void main(String[] args){
long ipLong = ipToLong("11.3.4.5");
logger.info(longToIp(ipLong));
}
}

9
escheduler-common/src/main/java/cn/escheduler/common/utils/JSONUtils.java

@ -27,10 +27,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
import java.util.*;
/**
* json utils
@ -109,7 +106,7 @@ public class JSONUtils {
*/
public static <T> List<T> toList(String json, Class<T> clazz) {
if (StringUtils.isEmpty(json)) {
return null;
return new ArrayList<>();
}
try {
return JSONArray.parseArray(json, clazz);
@ -117,7 +114,7 @@ public class JSONUtils {
logger.error("JSONArray.parseArray exception!",e);
}
return null;
return new ArrayList<>();
}

2
escheduler-common/src/main/java/cn/escheduler/common/utils/OSUtils.java

@ -220,7 +220,7 @@ public class OSUtils {
* @throws IOException
*/
public static String exeShell(String command) throws IOException {
return ShellExecutor.execCommand("groups");
return ShellExecutor.execCommand(command);
}
/**

14
escheduler-common/src/main/java/cn/escheduler/common/utils/ParameterUtils.java

@ -159,4 +159,18 @@ public class ParameterUtils {
}
return null;
}
/**
* handle escapes
* @param inputString
* @return
*/
public static String handleEscapes(String inputString){
if(StringUtils.isNotEmpty(inputString)){
return inputString.replace("%", "////%");
}
return inputString;
}
}

17
escheduler-common/src/main/java/cn/escheduler/common/utils/PropertyUtils.java

@ -22,6 +22,8 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import static cn.escheduler.common.Constants.COMMON_PROPERTIES_PATH;
@ -189,4 +191,19 @@ public class PropertyUtils {
String val = getString(key);
return val == null ? defaultValue : Enum.valueOf(type, val);
}
/**
* get all properties with specified prefix, like: fs.
* @param prefix prefix to search
* @return
*/
public static Map<String, String> getPrefixedProperties(String prefix) {
Map<String, String> matchedProperties = new HashMap<>();
for (String propName : properties.stringPropertyNames()) {
if (propName.startsWith(prefix)) {
matchedProperties.put(propName, properties.getProperty(propName));
}
}
return matchedProperties;
}
}

35
escheduler-common/src/main/java/cn/escheduler/common/utils/dependent/DependentDateUtils.java

@ -42,6 +42,21 @@ public class DependentDateUtils {
return dateIntervals;
}
/**
* get today day interval list
* @param businessDate
* @return
*/
public static List<DateInterval> getTodayInterval(Date businessDate){
List<DateInterval> dateIntervals = new ArrayList<>();
Date beginTime = DateUtils.getStartOfDay(businessDate);
Date endTime = DateUtils.getEndOfDay(businessDate);
dateIntervals.add(new DateInterval(beginTime, endTime));
return dateIntervals;
}
/**
* get last day interval list
* @param businessDate
@ -61,6 +76,16 @@ public class DependentDateUtils {
return dateIntervals;
}
/**
* get interval between this month first day and businessDate
* @param businessDate
* @return
*/
public static List<DateInterval> getThisMonthInterval(Date businessDate) {
Date firstDay = DateUtils.getFirstDayOfMonth(businessDate);
return getDateIntervalListBetweenTwoDates(firstDay, businessDate);
}
/**
* get interval between last month first day and last day
* @param businessDate
@ -93,6 +118,16 @@ public class DependentDateUtils {
}
}
/**
* get interval between monday to businessDate of this week
* @param businessDate
* @return
*/
public static List<DateInterval> getThisWeekInterval(Date businessDate) {
Date mondayThisWeek = DateUtils.getMonday(businessDate);
return getDateIntervalListBetweenTwoDates(mondayThisWeek, businessDate);
}
/**
* get interval between monday to sunday of last week
* default set monday the first day of week

7
escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/BusinessTimeUtils.java

@ -50,9 +50,16 @@ public class BusinessTimeUtils {
case RECOVER_TOLERANCE_FAULT_PROCESS:
case RECOVER_SUSPENDED_PROCESS:
case START_FAILURE_TASK_PROCESS:
case REPEAT_RUNNING:
case SCHEDULER:
default:
businessDate = addDays(new Date(), -1);
if (runTime != null){
/**
* If there is a scheduled time, take the scheduling time. Recovery from failed nodes, suspension of recovery, re-run for scheduling
*/
businessDate = addDays(runTime, -1);
}
break;
}
Date businessCurrentDate = addDays(businessDate, 1);

1
escheduler-common/src/main/resources/zookeeper.properties

@ -16,6 +16,7 @@ zookeeper.escheduler.lock.workers=/escheduler/lock/workers
#escheduler failover directory
zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters
zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers
zookeeper.escheduler.lock.failover.startup.masters=/escheduler/lock/failover/startup-masters
#escheduler failover directory
zookeeper.session.timeout=300

31
escheduler-common/src/test/java/cn/escheduler/common/utils/DependentUtilsTest.java

@ -52,6 +52,10 @@ public class DependentUtilsTest {
public void getDateIntervalList() {
Date curDay = DateUtils.stringToDate("2019-02-05 00:00:00");
DateInterval diCur = new DateInterval(DateUtils.getStartOfDay(curDay),
DateUtils.getEndOfDay(curDay));
Date day1 = DateUtils.stringToDate("2019-02-04 00:00:00");
DateInterval di1 = new DateInterval(DateUtils.getStartOfDay(day1),
DateUtils.getEndOfDay(day1));
@ -70,6 +74,33 @@ public class DependentUtilsTest {
Assert.assertEquals(dateIntervals.get(1), di1);
Assert.assertEquals(dateIntervals.get(0), di2);
dateValue = "today";
dateIntervals = DependentUtils.getDateIntervalList(curDay, dateValue);
Assert.assertEquals(dateIntervals.get(0), diCur);
dateValue = "thisWeek";
Date firstWeekDay = DateUtils.getMonday(curDay);
dateIntervals = DependentUtils.getDateIntervalList(curDay, dateValue);
DateInterval weekHead = new DateInterval(DateUtils.getStartOfDay(firstWeekDay), DateUtils.getEndOfDay(firstWeekDay));
DateInterval weekThis = new DateInterval(DateUtils.getStartOfDay(curDay), DateUtils.getEndOfDay(curDay));
Assert.assertEquals(dateIntervals.get(0), weekHead);
Assert.assertEquals(dateIntervals.get(dateIntervals.size() - 1), weekThis);
dateValue = "thisMonth";
Date firstMonthDay = DateUtils.getFirstDayOfMonth(curDay);
dateIntervals = DependentUtils.getDateIntervalList(curDay, dateValue);
DateInterval monthHead = new DateInterval(DateUtils.getStartOfDay(firstMonthDay), DateUtils.getEndOfDay(firstMonthDay));
DateInterval monthThis = new DateInterval(DateUtils.getStartOfDay(curDay), DateUtils.getEndOfDay(curDay));
Assert.assertEquals(dateIntervals.get(0), monthHead);
Assert.assertEquals(dateIntervals.get(dateIntervals.size() - 1), monthThis);
}
@Test

8
escheduler-dao/pom.xml

@ -4,7 +4,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.4-SNAPSHOT</version>
</parent>
<artifactId>escheduler-dao</artifactId>
<name>escheduler-dao</name>
@ -125,6 +125,12 @@
<dependency>
<groupId>cn.analysys</groupId>
<artifactId>escheduler-common</artifactId>
<exclusions>
<exclusion>
<artifactId>protobuf-java</artifactId>
<groupId>com.google.protobuf</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework</groupId>

67
escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java

@ -20,6 +20,7 @@ import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.*;
import cn.escheduler.common.model.DateInterval;
import cn.escheduler.common.model.TaskNode;
import cn.escheduler.common.process.Property;
import cn.escheduler.common.queue.ITaskQueue;
import cn.escheduler.common.queue.TaskQueueFactory;
import cn.escheduler.common.task.subprocess.SubProcessParameters;
@ -42,6 +43,7 @@ import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import java.util.*;
import java.util.stream.Collectors;
import static cn.escheduler.common.Constants.*;
import static cn.escheduler.dao.datasource.ConnectionFactory.getMapper;
@ -587,10 +589,12 @@ public class ProcessDao extends AbstractBaseDao {
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for(Integer taskId : failedList){
initTaskInstance(this.findTaskInstanceById(taskId));
}
@ -690,41 +694,62 @@ public class ProcessDao extends AbstractBaseDao {
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters.
*/
public ProcessInstance setSubProcessParam(ProcessInstance processInstance){
String cmdParam = processInstance.getCommandParam();
public ProcessInstance setSubProcessParam(ProcessInstance subProcessInstance){
String cmdParam = subProcessInstance.getCommandParam();
if(StringUtils.isEmpty(cmdParam)){
return processInstance;
return subProcessInstance;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if(paramMap.containsKey(CMDPARAM_SUB_PROCESS)
&& CMDPARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMDPARAM_SUB_PROCESS))){
paramMap.remove(CMDPARAM_SUB_PROCESS);
paramMap.put(CMDPARAM_SUB_PROCESS, String.valueOf(processInstance.getId()));
processInstance.setCommandParam(JSONUtils.toJson(paramMap));
processInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(processInstance);
paramMap.put(CMDPARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJson(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMDPARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if(StringUtils.isNotEmpty(parentInstanceId)){
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if(parentInstance != null){
processInstance.setGlobalParams(parentInstance.getGlobalParams());
this.saveProcessInstance(processInstance);
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
}else{
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if(processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0){
return processInstance;
return subProcessInstance;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(processInstance.getId());
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
return processInstance;
return subProcessInstance;
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
* @param parentGlobalParams
* @param subGlobalParams
* @return
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams){
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String,String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for(Property parent : parentPropertyList){
if(!subMap.containsKey(parent.getProp())){
subPropertyList.add(parent);
}
}
return JSONUtils.toJson(subPropertyList);
}
/**
@ -899,7 +924,11 @@ public class ProcessDao extends AbstractBaseDao {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if(taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE){
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1 );
}
taskInstance.setEndTime(null);
taskInstance.setStartTime(new Date());
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
@ -1574,6 +1603,20 @@ public class ProcessDao extends AbstractBaseDao {
}
/**
* master starup fault tolerant
*/
public void masterStartupFaultTolerant(){
int[] readyStopAndKill=new int[]{ExecutionStatus.READY_PAUSE.ordinal(),ExecutionStatus.READY_STOP.ordinal(),
ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(),ExecutionStatus.RUNNING_EXEUTION.ordinal()};
List<ProcessInstance> processInstanceList = processInstanceMapper.listByStatus(readyStopAndKill);
for (ProcessInstance processInstance:processInstanceList){
processNeedFailoverProcessInstances(processInstance);
}
}
@Transactional(value = "TransactionManager",rollbackFor = Exception.class)
public void selfFaultTolerant(ProcessInstance processInstance){

46
escheduler-dao/src/main/java/cn/escheduler/dao/TaskRecordDao.java

@ -17,6 +17,8 @@
package cn.escheduler.dao;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.TaskRecordStatus;
import cn.escheduler.common.utils.CollectionUtils;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.dao.model.TaskRecord;
import org.apache.commons.configuration.Configuration;
@ -28,6 +30,7 @@ import org.slf4j.LoggerFactory;
import java.sql.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@ -43,7 +46,7 @@ public class TaskRecordDao {
/**
* 加载配置文件
* load conf file
*/
private static Configuration conf;
@ -56,6 +59,14 @@ public class TaskRecordDao {
}
}
/**
* get task record flag
* @return
*/
public static boolean getTaskRecordFlag(){
return conf.getBoolean(Constants.TASK_RECORD_FLAG);
}
/**
* create connection
* @return
@ -253,4 +264,37 @@ public class TaskRecordDao {
}
return recordList;
}
/**
* according to procname and procdate query task record
* @param procName
* @param procDate
* @return
*/
public static TaskRecordStatus getTaskRecordState(String procName,String procDate){
String sql = String.format("SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
,procName,procDate + "%");
List<TaskRecord> taskRecordList = getQueryResult(sql);
// contains no record and sql exception
if (CollectionUtils.isEmpty(taskRecordList)){
// exception
return TaskRecordStatus.EXCEPTION;
}else if (taskRecordList.size() > 1){
return TaskRecordStatus.EXCEPTION;
}else {
TaskRecord taskRecord = taskRecordList.get(0);
if (taskRecord == null){
return TaskRecordStatus.EXCEPTION;
}
Long targetRowCount = taskRecord.getTargetRowCount();
if (targetRowCount <= 0){
return TaskRecordStatus.FAILURE;
}else {
return TaskRecordStatus.SUCCESS;
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save