Browse Source

Merge pull request #582 from analysys/dev-1.1.0

merge from dev-1.1.0
pull/2/head
bao liang 5 years ago committed by GitHub
parent
commit
2dc2c60ca9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 55
      docs/zh_CN/1.1.0-release.md
  2. 271
      docs/zh_CN/EasyScheduler-FAQ.md
  3. 1
      docs/zh_CN/SUMMARY.md
  4. 2
      docs/zh_CN/book.json
  5. BIN
      docs/zh_CN/images/cdh_hive_error.png
  6. BIN
      docs/zh_CN/images/hive_kerberos.png
  7. BIN
      docs/zh_CN/images/master_worker_lack_res.png
  8. BIN
      docs/zh_CN/images/sparksql_kerberos.png
  9. 2
      docs/zh_CN/后端部署文档.md
  10. 60
      docs/zh_CN/系统使用手册.md
  11. 2
      escheduler-alert/pom.xml
  12. 57
      escheduler-alert/src/main/java/cn/escheduler/alert/manager/EnterpriseWeChatManager.java
  13. 9
      escheduler-alert/src/main/java/cn/escheduler/alert/runner/AlertSender.java
  14. 8
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java
  15. 123
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/EnterpriseWeChatUtils.java
  16. 5
      escheduler-alert/src/main/java/cn/escheduler/alert/utils/FuncUtils.java
  17. 4
      escheduler-alert/src/main/resources/alert.properties
  18. 57
      escheduler-alert/src/test/java/cn/escheduler/alert/utils/EnterpriseWeChatUtilsTest.java
  19. 9
      escheduler-api/pom.xml
  20. 47
      escheduler-api/src/main/java/cn/escheduler/api/controller/DataSourceController.java
  21. 4
      escheduler-api/src/main/java/cn/escheduler/api/controller/MonitorController.java
  22. 29
      escheduler-api/src/main/java/cn/escheduler/api/controller/SchedulerController.java
  23. 6
      escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java
  24. 98
      escheduler-api/src/main/java/cn/escheduler/api/service/DataSourceService.java
  25. 29
      escheduler-api/src/main/java/cn/escheduler/api/service/ExecutorService.java
  26. 41
      escheduler-api/src/main/java/cn/escheduler/api/service/MonitorService.java
  27. 5
      escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java
  28. 30
      escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java
  29. 9
      escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java
  30. 50
      escheduler-api/src/main/java/cn/escheduler/api/service/ResourcesService.java
  31. 54
      escheduler-api/src/main/java/cn/escheduler/api/service/SchedulerService.java
  32. 38
      escheduler-api/src/main/java/cn/escheduler/api/service/TenantService.java
  33. 12
      escheduler-api/src/main/java/cn/escheduler/api/service/UdfFuncService.java
  34. 54
      escheduler-api/src/main/java/cn/escheduler/api/service/UsersService.java
  35. 4
      escheduler-api/src/main/java/cn/escheduler/api/utils/CheckUtils.java
  36. 1
      escheduler-api/src/main/java/cn/escheduler/api/utils/Constants.java
  37. 39
      escheduler-api/src/main/java/cn/escheduler/api/utils/ZookeeperMonitor.java
  38. 42
      escheduler-api/src/main/resources/logback.xml
  39. 13
      escheduler-api/src/test/java/cn/escheduler/api/controller/SchedulerControllerTest.java
  40. 29
      escheduler-api/src/test/java/cn/escheduler/api/utils/ZookeeperMonitorUtilsTest.java
  41. 2
      escheduler-common/pom.xml
  42. 77
      escheduler-common/src/main/java/cn/escheduler/common/Constants.java
  43. 29
      escheduler-common/src/main/java/cn/escheduler/common/enums/ResUploadType.java
  44. 15
      escheduler-common/src/main/java/cn/escheduler/common/enums/ZKNodeType.java
  45. 20
      escheduler-common/src/main/java/cn/escheduler/common/job/db/BaseDataSource.java
  46. 13
      escheduler-common/src/main/java/cn/escheduler/common/job/db/HiveDataSource.java
  47. 3
      escheduler-common/src/main/java/cn/escheduler/common/job/db/SparkDataSource.java
  48. 73
      escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java
  49. 11
      escheduler-common/src/main/java/cn/escheduler/common/utils/CommonUtils.java
  50. 102
      escheduler-common/src/main/java/cn/escheduler/common/utils/HadoopUtils.java
  51. 6
      escheduler-common/src/main/java/cn/escheduler/common/utils/IpUtils.java
  52. 14
      escheduler-common/src/main/java/cn/escheduler/common/utils/PropertyUtils.java
  53. 88
      escheduler-common/src/main/java/cn/escheduler/common/zk/AbstractZKClient.java
  54. 21
      escheduler-common/src/main/resources/common/common.properties
  55. 12
      escheduler-common/src/main/resources/common/hadoop/hadoop.properties
  56. 18
      escheduler-common/src/test/java/cn/escheduler/common/queue/TaskQueueImplTest.java
  57. 41
      escheduler-common/src/test/java/cn/escheduler/common/utils/IpUtilsTest.java
  58. 6
      escheduler-dao/pom.xml
  59. 72
      escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java
  60. 13
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/DataSourceMapper.java
  61. 19
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/DataSourceMapperProvider.java
  62. 5
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java
  63. 2
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java
  64. 11
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapper.java
  65. 15
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java
  66. 18
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ResourceMapper.java
  67. 15
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ResourceMapperProvider.java
  68. 7
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TaskInstanceMapperProvider.java
  69. 2
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/UserMapperProvider.java
  70. 17
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/WorkerServerMapper.java
  71. 15
      escheduler-dao/src/main/java/cn/escheduler/dao/mapper/WorkerServerMapperProvider.java
  72. 10
      escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessData.java
  73. 13
      escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessDefinition.java
  74. 53
      escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessInstance.java
  75. 8
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java
  76. 2
      escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java
  77. 125
      escheduler-dao/src/main/java/cn/escheduler/dao/utils/DagHelper.java
  78. 6
      escheduler-dao/src/main/resources/dao/data_source.properties
  79. 2
      escheduler-rpc/pom.xml
  80. 2
      escheduler-server/pom.xml
  81. 35
      escheduler-server/src/main/java/cn/escheduler/server/ResInfo.java
  82. 51
      escheduler-server/src/main/java/cn/escheduler/server/master/runner/MasterExecThread.java
  83. 3
      escheduler-server/src/main/java/cn/escheduler/server/utils/LoggerUtils.java
  84. 5
      escheduler-server/src/main/java/cn/escheduler/server/utils/ProcessUtils.java
  85. 21
      escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java
  86. 144
      escheduler-server/src/main/java/cn/escheduler/server/worker/runner/TaskScheduleThread.java
  87. 9
      escheduler-server/src/main/java/cn/escheduler/server/worker/task/PythonCommandExecutor.java
  88. 29
      escheduler-server/src/main/java/cn/escheduler/server/worker/task/python/PythonTask.java
  89. 17
      escheduler-server/src/main/java/cn/escheduler/server/worker/task/sql/SqlTask.java
  90. 224
      escheduler-server/src/main/java/cn/escheduler/server/zk/ZKMasterClient.java
  91. 31
      escheduler-server/src/main/java/cn/escheduler/server/zk/ZKWorkerClient.java
  92. 42
      escheduler-server/src/test/java/cn/escheduler/server/master/MasterCommandTest.java
  93. 5
      escheduler-server/src/test/java/cn/escheduler/server/zk/ZKWorkerClientTest.java
  94. 2
      escheduler-ui/src/js/conf/home/pages/dag/_source/config.js
  95. 42
      escheduler-ui/src/js/conf/home/pages/dag/_source/dag.vue
  96. 2
      escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue
  97. 2
      escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/mr.vue
  98. 2
      escheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js
  99. 114
      escheduler-ui/src/js/conf/home/pages/dag/_source/startingParam/index.vue
  100. 56
      escheduler-ui/src/js/conf/home/pages/dag/_source/udp/_source/selectTenant.vue
  101. Some files were not shown because too many files have changed in this diff Show More

55
docs/zh_CN/1.1.0-release.md

@ -0,0 +1,55 @@
Easy Scheduler Release 1.1.0
===
Easy Scheduler 1.1.0是1.x系列中的第六个版本。
新特性:
===
- [[EasyScheduler-391](https://github.com/analysys/EasyScheduler/issues/391)] run a process under a specified tenement user
- [[EasyScheduler-288](https://github.com/analysys/EasyScheduler/issues/288)] Feature/qiye_weixin
- [[EasyScheduler-189](https://github.com/analysys/EasyScheduler/issues/189)] Kerberos等安全支持
- [[EasyScheduler-398](https://github.com/analysys/EasyScheduler/issues/398)]管理员,有租户(install.sh设置默认租户),可以创建资源、项目和数据源(限制有一个管理员)
- [[EasyScheduler-293](https://github.com/analysys/EasyScheduler/issues/293)]点击运行流程时候选择的参数,没有地方可查看,也没有保存
- [[EasyScheduler-401](https://github.com/analysys/EasyScheduler/issues/401)]定时很容易定时每秒一次,定时完成以后可以在页面显示一下下次触发时间
- [[EasyScheduler-493](https://github.com/analysys/EasyScheduler/pull/493)]add datasource kerberos auth and FAQ modify and add resource upload s3
增强:
===
- [[EasyScheduler-227](https://github.com/analysys/EasyScheduler/issues/227)] upgrade spring-boot to 2.1.x and spring to 5.x
- [[EasyScheduler-434](https://github.com/analysys/EasyScheduler/issues/434)] worker节点数量 zk和mysql中不一致
- [[EasyScheduler-435](https://github.com/analysys/EasyScheduler/issues/435)]邮箱格式的验证
- [[EasyScheduler-441](https://github.com/analysys/EasyScheduler/issues/441)] 禁止运行节点加入已完成节点检测
- [[EasyScheduler-400](https://github.com/analysys/EasyScheduler/issues/400)] 首页页面,队列统计不和谐,命令统计无数据
- [[EasyScheduler-395](https://github.com/analysys/EasyScheduler/issues/395)] 对于容错恢复的流程,状态不能为 **正在运行
- [[EasyScheduler-529](https://github.com/analysys/EasyScheduler/issues/529)] optimize poll task from zookeeper
- [[EasyScheduler-242](https://github.com/analysys/EasyScheduler/issues/242)]worker-server节点获取任务性能问题
- [[EasyScheduler-352](https://github.com/analysys/EasyScheduler/issues/352)]worker 分组, 队列消费问题
- [[EasyScheduler-461](https://github.com/analysys/EasyScheduler/issues/461)]查看数据源参数,需要加密账号密码信息
- [[EasyScheduler-396](https://github.com/analysys/EasyScheduler/issues/396)]Dockerfile优化,并关联Dockerfile和github实现自动打镜像
- [[EasyScheduler-389](https://github.com/analysys/EasyScheduler/issues/389)]service monitor cannot find the change of master/worker
- [[EasyScheduler-511](https://github.com/analysys/EasyScheduler/issues/511)]support recovery process from stop/kill nodes.
- [[EasyScheduler-399](https://github.com/analysys/EasyScheduler/issues/399)]HadoopUtils指定用户操作,而不是 **部署用户
修复:
===
- [[EasyScheduler-394](https://github.com/analysys/EasyScheduler/issues/394)] master&worker部署在同一台机器上时,如果重启master&worker服务,会导致之前调度的任务无法继续调度
- [[EasyScheduler-469](https://github.com/analysys/EasyScheduler/issues/469)]Fix naming errors,monitor page
- [[EasyScheduler-392](https://github.com/analysys/EasyScheduler/issues/392)]Feature request: fix email regex check
- [[EasyScheduler-405](https://github.com/analysys/EasyScheduler/issues/405)]定时修改/添加页面,开始时间和结束时间不能相同
- [[EasyScheduler-517](https://github.com/analysys/EasyScheduler/issues/517)]补数 - 子工作流 - 时间参数
- [[EasyScheduler-532](https://github.com/analysys/EasyScheduler/issues/532)]python节点不执行的问题
- [[EasyScheduler-543](https://github.com/analysys/EasyScheduler/issues/543)]optimize datasource connection params safety
- [[EasyScheduler-569](https://github.com/analysys/EasyScheduler/issues/569)]定时任务无法真正停止
- [[EasyScheduler-463](https://github.com/analysys/EasyScheduler/issues/463)]邮箱验证不支持非常见后缀邮箱
感谢:
===
最后但最重要的是,没有以下伙伴的贡献就没有新版本的诞生:
Baoqi, jimmy201602, samz406, petersear, millionfor, hyperknob, fanguanqun, yangqinlong, qq389401879, chgxtony, Stanfan, lfyee, thisnew, hujiang75277381, sunnyingit, lgbo-ustc, ivivi, lzy305, JackIllkid, telltime, lipengbo2018, wuchunfu, telltime
以及微信群里众多的热心伙伴!在此非常感谢!

271
docs/zh_CN/EasyScheduler-FAQ.md

@ -1,96 +1,287 @@
Q:单机运行服务老挂,应该是内存不够,测试机器4核8G。生产环境需要分布式,如果单机的话建议的配置是?
## Q:EasyScheduler服务介绍及建议运行内存
A: Easy Scheduler有5个服务组成,这些服务本身需要的内存和cpu不多,
A: EasyScheduler由5个服务组成,MasterServer、WorkerServer、ApiServer、AlertServer、LoggerServer和UI。
| 服务 | 内存 | cpu核数 |
| ------------ | ---- | ------- |
| MasterServer | 2G | 2核 |
| WorkerServer | 2G | 2核 |
| ApiServer | 512M | 1核 |
| AlertServer | 512M | 1核 |
| LoggerServer | 512M | 1核 |
| 服务 | 说明 |
| ------------------------- | ------------------------------------------------------------ |
| MasterServer | 主要负责 **DAG** 的切分和任务状态的监控 |
| WorkerServer/LoggerServer | 主要负责任务的提交、执行和任务状态的更新。LoggerServer用于Rest Api通过 **RPC** 查看日志 |
| ApiServer | 提供Rest Api服务,供UI进行调用 |
| AlertServer | 提供告警服务 |
| UI | 前端页面展示 |
注意:由于如果任务较多,WorkServer所在机器建议物理内存在16G以上
注意:**由于服务比较多,建议单机部署最好是4核16G以上**
---
## Q: 管理员为什么不能创建项目
A:管理员目前属于"**纯管理**", 没有租户,即没有linux上对应的用户,所以没有执行权限, **故没有所属的项目、资源及数据源**,所以没有创建权限。**但是有所有的查看权限**。如果需要创建项目等业务操作,**请使用管理员创建租户和普通用户,然后使用普通用户登录进行操作**。我们将会在1.1.0版本中将管理员的创建和执行权限放开,管理员将会有所有的权限
---
## Q:系统支持哪些邮箱?
A:支持绝大多数邮箱,qq、163、126、139、outlook、aliyun等皆支持。支持**TLS和SSL**协议,可以在alert.properties中选择性配置
---
## Q:常用的系统变量时间参数有哪些,如何使用?
A:请参考 https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C.html#%E7%B3%BB%E7%BB%9F%E5%8F%82%E6%95%B0
---
## Q:pip install kazoo 这个安装报错。是必须安装的吗?
A: 这个是python连接zookeeper需要使用到的,必须要安装
---
Q: 管理员为什么不能创建项目?
## Q: 怎么指定机器运行任务
A: 管理员目前属于"纯管理", 没有租户,即没有linux上对应的用户,所以没有执行权限, 但是有所有的查看权限。如果需要创建项目等业务操作,请使用管理员创建租户和普通用户,然后使用普通用户登录进行操作
A:使用 **管理员** 创建Worker分组,在 **流程定义启动** 的时候可**指定Worker分组**或者在**任务节点上指定Worker分组**。如果不指定,则使用Default,**Default默认是使用的集群里所有的Worker中随机选取一台来进行任务提交、执行**
---
Q: 系统支持哪些邮箱?
## Q:任务的优先级
A:我们同时 **支持流程和任务的优先级**。优先级我们有 **HIGHEST、HIGH、MEDIUM、LOW和LOWEST** 五种级别。**可以设置不同流程实例之间的优先级,也可以设置同一个流程实例中不同任务实例的优先级**。详细内容请参考任务优先级设计 https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1.html#%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1
----
## Q:escheduler-grpc报错
A:在根目录下执行:mvn -U clean package assembly:assembly -Dmaven.test.skip=true , 然后刷新下整个项目
----
## Q:EasyScheduler支持windows上运行么
A: 理论上只有**Worker是需要在Linux上运行的**,其它的服务都是可以在windows上正常运行的。但是还是建议最好能在linux上部署使用
-----
## Q:UI 在 linux 编译node-sass提示:Error:EACCESS:permission denied,mkdir xxxx
A: 支持绝大多数邮箱,qq、163、126、139、outlook、aliyun等皆可支持
A:单独安装 **npm install node-sass --unsafe-perm**,之后再 **npm install**
---
Q:常用的系统变量时间参数有哪些,如何使用?
## Q:UI 不能正常登陆访问
A: 请参考使用手册中的系统参数
A: 1,如果是node启动的查看escheduler-ui下的.env API_BASE配置是否是Api Server服务地址
2,如果是nginx启动的并且是通过 **install-escheduler-ui.sh** 安装的,查看 **/etc/nginx/conf.d/escheduler.conf** 中的proxy_pass配置是否是Api Server服务地址
3,如果以上配置都是正确的,那么请查看Api Server服务是否是正常的,curl http://192.168.xx.xx:12345/escheduler/users/get-user-info,查看Api Server日志,如果提示 cn.escheduler.api.interceptor.LoginHandlerInterceptor:[76] - session info is null,则证明Api Server服务是正常的
4,如果以上都没有问题,需要查看一下 **application.properties** 中的 **server.context-path 和 server.port 配置**是否正确
---
Q:pip install kazoo 这个安装报错。是必须安装的吗?
## Q: 流程定义手动启动或调度启动之后,没有流程实例生成
A: 1,首先通过**jps 查看MasterServer服务是否存在**,或者从服务监控直接查看zk中是否存在master服务
A: 这个是python连接zookeeper需要使用到的
2,如果存在master服务,查看 **命令状态统计** 或者 **t_escheduler_error_command** 中是否增加的新记录,如果增加了,**请查看 message 字段定位启动异常原因**
---
Q: 如果alert、api、logger服务任意一个宕机,任何还会正常执行吧
## Q : 任务状态一直处于提交成功状态
A: 不影响,影响正在运行中的任务的服务有Master和Worker服务
A: 1,首先通过**jps 查看WorkerServer服务是否存在**,或者从服务监控直接查看zk中是否存在worker服务
2,如果 **WorkerServer** 服务正常,需要 **查看MasterServer是否把task任务放到zk队列中** ,**需要查看MasterServer日志及zk队列中是否有任务阻塞**
3,如果以上都没有问题,需要定位是否指定了Worker分组,但是 **Worker分组的机器不是在线状态**
---
Q: 这个怎么指定机器运行任务的啊 」
## Q: 是否提供Docker镜像及Dockerfile
A: 提供Docker镜像及Dockerfile。
A: 通过worker分组: 这个流程只能在指定的机器组里执行。默认是Default,可以在任一worker上执行。
Docker镜像地址:https://hub.docker.com/r/escheduler/escheduler_images
Dockerfile地址:https://github.com/qiaozhanwei/escheduler_dockerfile/tree/master/docker_escheduler
---
Q: 跨用户的任务依赖怎么实现呢, 比如A用户写了一个任务,B用户需要依赖这个任务
## Q : install.sh 中需要注意问题
A: 1,如果替换变量中包含特殊字符,**请用 \ 转移符进行转移**
2,installPath="/data1_1T/escheduler",**这个目录不能和当前要一键安装的install.sh目录是一样的**
就比如说 我们数仓组 写了一个 中间宽表的任务, 其他业务部门想要使用这个中间表的时候,他们应该是另外一个用户,怎么依赖这个中间表呢
3,deployUser="escheduler",**部署用户必须具有sudo权限**,因为worker是通过sudo -u 租户 sh xxx.command进行执行的
A: 有两种情况,一个是要运行这个宽表任务,可以使用子工作流把宽表任务放到自己的工作流里面。另一个是检查这个宽表任务有没有完成,可以使用依赖节点来检查这个宽表任务在指定的时间周期有没有完成。
4,monitorServerState="false",服务监控脚本是否启动,默认是不启动服务监控脚本的。**如果启动服务监控脚本,则每5分钟定时来监控master和worker的服务是否down机,如果down机则会自动重启**
5,hdfsStartupSate="false",是否开启HDFS资源上传功能。默认是不开启的,**如果不开启则资源中心是不能使用的**。如果开启,需要conf/common/hadoop/hadoop.properties中配置fs.defaultFS和yarn的相关配置,如果使用namenode HA,需要将core-site.xml和hdfs-site.xml复制到conf根目录下
注意:**1.0.x版本是不会自动创建hdfs根目录的,需要自行创建,并且需要部署用户有hdfs的操作权限**
---
Q: 启动WorkerServer服务时不能正常启动,报以下信息是什么原因?
## Q : 流程定义和流程实例下线异常
A : 对于 **1.0.4 以前的版本中**,修改escheduler-api cn.escheduler.api.quartz包下的代码即可
```
[INFO] 2019-05-06 16:39:31.492 cn.escheduler.server.zk.ZKWorkerClient:[155] - register failure , worker already started on : 127.0.0.1, please wait for a moment and try again
public boolean deleteJob(String jobName, String jobGroupName) {
lock.writeLock().lock();
try {
JobKey jobKey = new JobKey(jobName,jobGroupName);
if(scheduler.checkExists(jobKey)){
logger.info("try to delete job, job name: {}, job group name: {},", jobName, jobGroupName);
return scheduler.deleteJob(jobKey);
}else {
return true;
}
} catch (SchedulerException e) {
logger.error(String.format("delete job : %s failed",jobName), e);
} finally {
lock.writeLock().unlock();
}
return false;
}
```
A:Worker/Master Server在启动时,会向Zookeeper注册自己的启动信息,是Zookeeper的临时节点,如果两次启动时间间隔较短的情况,上次启动的Worker/Master Server在Zookeeper的会话还未过期,会出现上述信息,处理办法是等待session过期,一般是1分钟左右
---
----
## Q : HDFS启动之前创建的租户,能正常使用资源中心吗
Q: 编译时escheduler-grpc模块一直报错:Information:java: Errors occurred while compiling module 'escheduler-rpc', 找不到LogParameter、RetStrInfo、RetByteInfo等class类
A: 不能。因为在未启动HDFS创建的租户,不会在HDFS中注册租户目录。所以上次资源会报错
A: 这是因为rpc源码包是google Grpc实现的,需要使用maven进行编译,在根目录下执行:mvn -U clean package assembly:assembly -Dmaven.test.skip=true , 然后刷新下整个项目
## Q : 多Master和多Worker状态下,服务掉了,怎么容错
----
A: **注意:Master监控Master及Worker服务。**
Q:EasyScheduler支持windows上运行么?
1,如果Master服务掉了,其它的Master会接管挂掉的Master的流程,继续监控Worker task状态
A: 建议在Ubuntu、Centos上运行,暂不支持windows上运行,不过windows上可以进行编译。开发调试的话建议Ubuntu或者mac上进行。
2,如果Worker服务掉,Master会监控到Worker服务掉了,如果存在Yarn任务,Kill Yarn任务之后走重试
具体请看容错设计:https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1.html#%E7%B3%BB%E7%BB%9F%E6%9E%B6%E6%9E%84%E8%AE%BE%E8%AE%A1
---
## Q : 对于Master和Worker一台机器伪分布式下的容错
A : 1.0.3 版本只实现了Master启动流程容错,不走Worker容错。也就是说如果Worker挂掉的时候,没有Master存在。这流程将会出现问题。我们会在 **1.1.0** 版本中增加Master和Worker启动自容错,修复这个问题。如果想手动修改这个问题,需要针对 **跨重启正在运行流程** **并且已经掉的正在运行的Worker任务,需要修改为失败**,**同时跨重启正在运行流程设置为失败状态**。然后从失败节点进行流程恢复即可
---
## Q : 定时容易设置成每秒执行
A : 设置定时的时候需要注意,如果第一位(* * * * * ? *)设置成 \* ,则表示每秒执行。**我们将会在1.1.0版本中加入显示最近调度的时间列表** ,使用http://cron.qqe2.com/ 可以在线看近5次运行时间
-----
Q:任务为什么不执行?
A: 不执行的原因:
## Q: 定时有有效时间范围吗
查看command表里有没有内容?
A:有的,**如果定时的起止时间是同一个时间,那么此定时将是无效的定时**。**如果起止时间的结束时间比当前的时间小,很有可能定时会被自动删除**
查看Master server的运行日志:
查看Worker Server的运行日志
## Q : 任务依赖有几种实现
A: 1,**DAG** 之间的任务依赖关系,是从 **入度为零** 进行DAG切分的
2,有 **任务依赖节点** ,可以实现跨流程的任务或者流程依赖,具体请参考 依赖(DEPENDENT)节点:https://analysys.github.io/easyscheduler_docs_cn/%E7%B3%BB%E7%BB%9F%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C.html#%E4%BB%BB%E5%8A%A1%E8%8A%82%E7%82%B9%E7%B1%BB%E5%9E%8B%E5%92%8C%E5%8F%82%E6%95%B0%E8%AE%BE%E7%BD%AE
注意:**不支持跨项目的流程或任务依赖**
## Q: 流程定义有几种启动方式
A: 1,在 **流程定义列表**,点击 **启动** 按钮
2,**流程定义列表添加定时器**,调度启动流程定义
3,流程定义 **查看或编辑** DAG 页面,任意 **任务节点右击** 启动流程定义
4,可以对流程定义 DAG 编辑,设置某些任务的运行标志位 **禁止运行**,则在启动流程定义的时候,将该节点的连线将从DAG中去掉
## Q : Python任务设置Python版本
A: 1,对于1**.0.3之后的版本**只需要修改 conf/env/.escheduler_env.sh中的PYTHON_HOME
```
export PYTHON_HOME=/bin/python
```
注意:这了 **PYTHON_HOME** ,是python命令的绝对路径,而不是单纯的 PYTHON_HOME,还需要注意的是 export PATH 的时候,需要直接
```
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
```
2,对 1.0.3 之前的版本,Python任务只能支持系统的Python版本,不支持指定Python版本
## Q: Worker Task 通过sudo -u 租户 sh xxx.command会产生子进程,在kill的时候,是否会杀掉
A: 我们会在1.0.4中增加kill任务同时,kill掉任务产生的各种所有子进程
## Q : EasyScheduler中的队列怎么用,用户队列和租户队列是什么意思
A : EasyScheduler 中的队列可以在用户或者租户上指定队列,**用户指定的队列优先级是高于租户队列的优先级的。**,例如:对MR任务指定队列,是通过 mapreduce.job.queuename 来指定队列的。
注意:MR在用以上方法指定队列的时候,传递参数请使用如下方式:
```
Configuration conf = new Configuration();
GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
String[] remainingArgs = optionParser.getRemainingArgs();
```
如果是Spark任务 --queue 方式指定队列
## Q : Master 或者 Worker报如下告警
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_worker_lack_res.png" width="60%" />
</p>
A : 修改conf下的 master.properties **master.reserved.memory** 的值为更小的值,比如说0.1 或者
worker.properties **worker.reserved.memory** 的值为更小的值,比如说0.1
## Q : hive版本是1.1.0+cdh5.15.0,SQL hive任务连接报错
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/cdh_hive_error.png" width="60%" />
</p>
A : 将 hive pom
```
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>2.1.0</version>
</dependency>
```
修改为
```
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>1.1.0</version>
</dependency>
```

1
docs/zh_CN/SUMMARY.md

@ -35,6 +35,7 @@
* 系统版本升级文档
* [版本升级](升级文档.md)
* 历次版本发布内容
* [1.1.0 release](1.1.0-release.md)
* [1.0.3 release](1.0.3-release.md)
* [1.0.2 release](1.0.2-release.md)
* [1.0.1 release](1.0.1-release.md)

2
docs/zh_CN/book.json

@ -1,6 +1,6 @@
{
"title": "调度系统-EasyScheduler",
"author": "YIGUAN",
"author": "",
"description": "调度系统",
"language": "zh-hans",
"gitbook": "3.2.3",

BIN
docs/zh_CN/images/cdh_hive_error.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

BIN
docs/zh_CN/images/hive_kerberos.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

BIN
docs/zh_CN/images/master_worker_lack_res.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

BIN
docs/zh_CN/images/sparksql_kerberos.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

2
docs/zh_CN/后端部署文档.md

@ -66,7 +66,7 @@ escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL
mysql -h {host} -u {user} -p{password} -D {db} < quartz.sql
```
* 1.0.2版本创建表和导入基础数据
* 1.0.2之后(含1.0.2)版本创建表和导入基础数据
修改conf/dao/data_source.properties中的下列属性
```

60
docs/zh_CN/系统使用手册.md

@ -60,7 +60,7 @@
### 执行流程定义
- **未上线状态的流程定义可以编辑,但是不可以运行**,所以先上线工作流
> 点击工作流定义,返回流程定义列表,点击”上线“图标,上线工作流定义。
> "下线"工作流之前,要先将定时管理的定时下线,才能成功下线工作流定义
- 点击”运行“,执行工作流。运行参数说明:
@ -98,28 +98,28 @@
### 查看流程实例
> 点击“工作流实例”,查看流程实例列表。
> 点击工作流名称,查看任务执行状态。
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/instance-detail.png" width="60%" />
</p>
> 点击任务节点,点击“查看日志”,查看任务执行日志。
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/task-log.png" width="60%" />
</p>
> 点击任务实例节点,点击**查看历史**,可以查看该流程实例运行的该任务实例列表
<p align="center">
<img src="https://analysys.github.io/EasyScheduler/zh_CN/images/task_history.png" width="60%" />
</p>
> 对工作流实例的操作:
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/instance-list.png" width="60%" />
</p>
@ -165,7 +165,7 @@
- 密码:设置连接MySQL的密码
- 数据库名:输入连接MySQL的数据库名称
- Jdbc连接参数:用于MySQL连接的参数设置,以JSON形式填写
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mysql_edit.png" width="60%" />
</p>
@ -191,7 +191,7 @@
#### 创建、编辑HIVE数据源
1.使用HiveServer2方式连接
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/hive_edit.png" width="60%" />
</p>
@ -207,12 +207,20 @@
- Jdbc连接参数:用于HIVE连接的参数设置,以JSON形式填写
2.使用HiveServer2 HA Zookeeper方式连接
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/hive_edit2.png" width="60%" />
</p>
注意:如果开启了**kerberos**,则需要填写 **Principal**
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/hive_kerberos.png" width="60%" />
</p>
#### 创建、编辑Spark数据源
<p align="center">
@ -229,9 +237,17 @@
- 数据库名:输入连接Spark的数据库名称
- Jdbc连接参数:用于Spark连接的参数设置,以JSON形式填写
注意:如果开启了**kerberos**,则需要填写 **Principal**
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/sparksql_kerberos.png" width="60%" />
</p>
### 上传资源
- 上传资源文件和udf函数,所有上传的文件和资源都会被存储到hdfs上,所以需要以下配置项:
```
conf/common/common.properties
-- hdfs.startup.state=true
@ -242,7 +258,7 @@ conf/common/hadoop.properties
```
#### 文件管理
> 是对各种资源文件的管理,包括创建基本的txt/log/sh/conf等文件、上传jar包等各种类型文件,以及编辑、下载、删除等操作。
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/file-manage.png" width="60%" />
@ -287,7 +303,7 @@ conf/common/hadoop.properties
#### 资源管理
> 资源管理和文件管理功能类似,不同之处是资源管理是上传的UDF函数,文件管理上传的是用户程序,脚本及配置文件
* 上传udf资源
> 和上传文件相同。
@ -303,7 +319,7 @@ conf/common/hadoop.properties
- 参数:用来标注函数的输入参数
- 数据库名:预留字段,用于创建永久UDF函数
- UDF资源:设置创建的UDF对应的资源文件
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/udf_edit.png" width="60%" />
</p>
@ -312,7 +328,7 @@ conf/common/hadoop.properties
- 安全中心是只有管理员账户才有权限的功能,有队列管理、租户管理、用户管理、告警组管理、worker分组、令牌管理等功能,还可以对资源、数据源、项目等授权
- 管理员登录,默认用户名密码:admin/escheduler123
### 创建队列
- 队列是在执行spark、mapreduce等程序,需要用到“队列”参数时使用的。
- “安全中心”->“队列管理”->“创建队列”
@ -357,7 +373,7 @@ conf/common/hadoop.properties
### 令牌管理
- 由于后端接口有登录检查,令牌管理,提供了一种可以通过调用接口的方式对系统进行各种操作。
- 调用示例:
```令牌调用示例
/**
* test token
@ -477,15 +493,15 @@ conf/common/hadoop.properties
### 依赖(DEPENDENT)节点
- 依赖节点,就是**依赖检查节点**。比如A流程依赖昨天的B流程执行成功,依赖节点会去检查B流程在昨天是否有执行成功的实例。
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dependent_edit.png" width="60%" />
</p>
> 依赖节点提供了逻辑判断功能,比如检查昨天的B流程是否成功,或者C流程是否执行成功。
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/depend-node.png" width="80%" />
</p>
@ -536,7 +552,7 @@ conf/common/hadoop.properties
### SPARK节点
- 通过SPARK节点,可以直接直接执行SPARK程序,对于spark节点,worker会使用`spark-submit`方式提交任务
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SPARK.png)任务节点到画板中,双击任务节点,如下图:
<p align="center">
@ -563,7 +579,7 @@ conf/common/hadoop.properties
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_MR.png)任务节点到画板中,双击任务节点,如下图:
1. JAVA程序
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mr_java.png" width="60%" />
</p>
@ -592,7 +608,7 @@ conf/common/hadoop.properties
### Python节点
- 使用python节点,可以直接执行python脚本,对于python节点,worker会使用`python **`方式提交任务。
> 拖动工具栏中的![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PYTHON.png)任务节点到画板中,双击任务节点,如下图:

2
escheduler-alert/pom.xml

@ -4,7 +4,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.4-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<artifactId>escheduler-alert</artifactId>
<packaging>jar</packaging>

57
escheduler-alert/src/main/java/cn/escheduler/alert/manager/EnterpriseWeChatManager.java

@ -0,0 +1,57 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.alert.manager;
import cn.escheduler.alert.utils.Constants;
import cn.escheduler.alert.utils.EnterpriseWeChatUtils;
import cn.escheduler.dao.model.Alert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Enterprise WeChat Manager
*/
public class EnterpriseWeChatManager {
private static final Logger logger = LoggerFactory.getLogger(MsgManager.class);
/**
* Enterprise We Chat send
* @param alert
*/
public Map<String,Object> send(Alert alert, String token){
Map<String,Object> retMap = new HashMap<>();
retMap.put(Constants.STATUS, false);
String agentId = EnterpriseWeChatUtils.enterpriseWeChatAgentId;
String users = EnterpriseWeChatUtils.enterpriseWeChatUsers;
List<String> userList = Arrays.asList(users.split(","));
logger.info("send message {}",alert);
String msg = EnterpriseWeChatUtils.makeUserSendMsg(userList, agentId,EnterpriseWeChatUtils.markdownByAlert(alert));
try {
EnterpriseWeChatUtils.sendEnterpriseWeChat(Constants.UTF_8, msg, token);
} catch (IOException e) {
logger.error(e.getMessage(),e);
}
retMap.put(Constants.STATUS, true);
return retMap;
}
}

9
escheduler-alert/src/main/java/cn/escheduler/alert/runner/AlertSender.java

@ -17,7 +17,9 @@
package cn.escheduler.alert.runner;
import cn.escheduler.alert.manager.EmailManager;
import cn.escheduler.alert.manager.EnterpriseWeChatManager;
import cn.escheduler.alert.utils.Constants;
import cn.escheduler.alert.utils.EnterpriseWeChatUtils;
import cn.escheduler.common.enums.AlertStatus;
import cn.escheduler.common.enums.AlertType;
import cn.escheduler.dao.AlertDao;
@ -40,6 +42,7 @@ public class AlertSender{
private static final Logger logger = LoggerFactory.getLogger(AlertSender.class);
private static final EmailManager emailManager= new EmailManager();
private static final EnterpriseWeChatManager weChatManager= new EnterpriseWeChatManager();
private List<Alert> alertList;
@ -109,6 +112,12 @@ public class AlertSender{
if (flag){
alertDao.updateAlert(AlertStatus.EXECUTION_SUCCESS, "execution success", alert.getId());
logger.info("alert send success");
try {
String token = EnterpriseWeChatUtils.getToken();
weChatManager.send(alert,token);
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
}else {
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE,String.valueOf(retMaps.get(Constants.MESSAGE)),alert.getId());
logger.info("alert send error : {}" , String.valueOf(retMaps.get(Constants.MESSAGE)));

8
escheduler-alert/src/main/java/cn/escheduler/alert/utils/Constants.java

@ -129,6 +129,10 @@ public class Constants {
public static final int ALERT_SCAN_INTERVEL = 5000;
public static final String MARKDOWN_QUOTE = ">";
public static final String MARKDOWN_ENTER = "\n";
public static final String ENTERPRISE_WECHAT_CORP_ID = "enterprise.wechat.corp.id";
public static final String ENTERPRISE_WECHAT_SECRET = "enterprise.wechat.secret";
@ -140,4 +144,8 @@ public class Constants {
public static final String ENTERPRISE_WECHAT_TEAM_SEND_MSG = "enterprise.wechat.team.send.msg";
public static final String ENTERPRISE_WECHAT_USER_SEND_MSG = "enterprise.wechat.user.send.msg";
public static final String ENTERPRISE_WECHAT_AGENT_ID = "enterprise.wechat.agent.id";
public static final String ENTERPRISE_WECHAT_USERS = "enterprise.wechat.users";
}

123
escheduler-alert/src/main/java/cn/escheduler/alert/utils/EnterpriseWeChatUtils.java

@ -16,9 +16,12 @@
*/
package cn.escheduler.alert.utils;
import cn.escheduler.common.enums.ShowType;
import cn.escheduler.dao.model.Alert;
import com.alibaba.fastjson.JSON;
import com.google.common.reflect.TypeToken;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
@ -31,13 +34,12 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.*;
import static cn.escheduler.alert.utils.PropertyUtils.getString;
/**
* qiye weixin utils
* Enterprise WeChat utils
*/
public class EnterpriseWeChatUtils {
@ -48,7 +50,7 @@ public class EnterpriseWeChatUtils {
private static final String enterpriseWeChatSecret = getString(Constants.ENTERPRISE_WECHAT_SECRET);
private static final String enterpriseWeChatTokenUrl = getString(Constants.ENTERPRISE_WECHAT_TOKEN_URL);
private String enterpriseWeChatTokenUrlReplace = enterpriseWeChatTokenUrl
private static String enterpriseWeChatTokenUrlReplace = enterpriseWeChatTokenUrl
.replaceAll("\\$corpId", enterpriseWeChatCorpId)
.replaceAll("\\$secret", enterpriseWeChatSecret);
@ -58,12 +60,16 @@ public class EnterpriseWeChatUtils {
private static final String enterpriseWeChatUserSendMsg = getString(Constants.ENTERPRISE_WECHAT_USER_SEND_MSG);
public static final String enterpriseWeChatAgentId = getString(Constants.ENTERPRISE_WECHAT_AGENT_ID);
public static final String enterpriseWeChatUsers = getString(Constants.ENTERPRISE_WECHAT_USERS);
/**
* get winxin token info
* get Enterprise WeChat token info
* @return token string info
* @throws IOException
*/
public String getToken() throws IOException {
public static String getToken() throws IOException {
String resp;
CloseableHttpClient httpClient = HttpClients.createDefault();
@ -71,7 +77,7 @@ public class EnterpriseWeChatUtils {
CloseableHttpResponse response = httpClient.execute(httpGet);
try {
HttpEntity entity = response.getEntity();
resp = EntityUtils.toString(entity, "utf-8");
resp = EntityUtils.toString(entity, Constants.UTF_8);
EntityUtils.consume(entity);
} finally {
response.close();
@ -84,26 +90,26 @@ public class EnterpriseWeChatUtils {
}
/**
* make team single weixin message
* make team single Enterprise WeChat message
* @param toParty
* @param agentId
* @param msg
* @return weixin send message
* @return Enterprise WeChat send message
*/
public String makeTeamSendMsg(String toParty, String agentId, String msg) {
public static String makeTeamSendMsg(String toParty, String agentId, String msg) {
return enterpriseWeChatTeamSendMsg.replaceAll("\\$toParty", toParty)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
}
/**
* make team multi weixin message
* make team multi Enterprise WeChat message
* @param toParty
* @param agentId
* @param msg
* @return weixin send message
* @return Enterprise WeChat send message
*/
public String makeTeamSendMsg(Collection<String> toParty, String agentId, String msg) {
public static String makeTeamSendMsg(Collection<String> toParty, String agentId, String msg) {
String listParty = FuncUtils.mkString(toParty, "|");
return enterpriseWeChatTeamSendMsg.replaceAll("\\$toParty", listParty)
.replaceAll("\\$agentId", agentId)
@ -115,9 +121,9 @@ public class EnterpriseWeChatUtils {
* @param toUser
* @param agentId
* @param msg
* @return weixin send message
* @return Enterprise WeChat send message
*/
public String makeUserSendMsg(String toUser, String agentId, String msg) {
public static String makeUserSendMsg(String toUser, String agentId, String msg) {
return enterpriseWeChatUserSendMsg.replaceAll("\\$toUser", toUser)
.replaceAll("\\$agentId", agentId)
.replaceAll("\\$msg", msg);
@ -128,9 +134,9 @@ public class EnterpriseWeChatUtils {
* @param toUser
* @param agentId
* @param msg
* @return weixin send message
* @return Enterprise WeChat send message
*/
public String makeUserSendMsg(Collection<String> toUser, String agentId, String msg) {
public static String makeUserSendMsg(Collection<String> toUser, String agentId, String msg) {
String listUser = FuncUtils.mkString(toUser, "|");
return enterpriseWeChatUserSendMsg.replaceAll("\\$toUser", listUser)
.replaceAll("\\$agentId", agentId)
@ -138,14 +144,14 @@ public class EnterpriseWeChatUtils {
}
/**
* send weixin
* send Enterprise WeChat
* @param charset
* @param data
* @param token
* @return weixin resp, demo: {"errcode":0,"errmsg":"ok","invaliduser":""}
* @return Enterprise WeChat resp, demo: {"errcode":0,"errmsg":"ok","invaliduser":""}
* @throws IOException
*/
public String sendQiyeWeixin(String charset, String data, String token) throws IOException {
public static String sendEnterpriseWeChat(String charset, String data, String token) throws IOException {
String enterpriseWeChatPushUrlReplace = enterpriseWeChatPushUrl.replaceAll("\\$token", token);
CloseableHttpClient httpclient = HttpClients.createDefault();
@ -160,8 +166,83 @@ public class EnterpriseWeChatUtils {
} finally {
response.close();
}
logger.info("qiye weixin send [{}], param:{}, resp:{}", enterpriseWeChatPushUrl, data, resp);
logger.info("Enterprise WeChat send [{}], param:{}, resp:{}", enterpriseWeChatPushUrl, data, resp);
return resp;
}
/**
* convert table to markdown style
* @param title
* @param content
* @return
*/
public static String markdownTable(String title,String content){
List<LinkedHashMap> mapItemsList = JSONUtils.toList(content, LinkedHashMap.class);
StringBuilder contents = new StringBuilder(200);
for (LinkedHashMap mapItems : mapItemsList){
Set<Map.Entry<String, String>> entries = mapItems.entrySet();
Iterator<Map.Entry<String, String>> iterator = entries.iterator();
StringBuilder t = new StringBuilder(String.format("`%s`%s",title,Constants.MARKDOWN_ENTER));
while (iterator.hasNext()){
Map.Entry<String, String> entry = iterator.next();
t.append(Constants.MARKDOWN_QUOTE);
t.append(entry.getKey()).append(":").append(entry.getValue());
t.append(Constants.MARKDOWN_ENTER);
}
contents.append(t);
}
return contents.toString();
}
/**
* convert text to markdown style
* @param title
* @param content
* @return
*/
public static String markdownText(String title,String content){
if (StringUtils.isNotEmpty(content)){
List<String> list;
try {
list = JSONUtils.toList(content,String.class);
}catch (Exception e){
logger.error("json format exception",e);
return null;
}
StringBuilder contents = new StringBuilder(100);
contents.append(String.format("`%s`\n",title));
for (String str : list){
contents.append(Constants.MARKDOWN_QUOTE);
contents.append(str);
contents.append(Constants.MARKDOWN_ENTER);
}
return contents.toString();
}
return null;
}
/**
* Determine the mardown style based on the show type of the alert
* @param alert
* @return
*/
public static String markdownByAlert(Alert alert){
String result = "";
if (alert.getShowType() == ShowType.TABLE) {
result = markdownTable(alert.getTitle(),alert.getContent());
}else if(alert.getShowType() == ShowType.TEXT){
result = markdownText(alert.getTitle(),alert.getContent());
}
return result;
}
}

5
escheduler-alert/src/main/java/cn/escheduler/alert/utils/FuncUtils.java

@ -22,10 +22,11 @@ public class FuncUtils {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (String item : list) {
if (first)
if (first) {
first = false;
else
} else {
sb.append(split);
}
sb.append(item);
}
return sb.toString();

4
escheduler-alert/src/main/resources/alert.properties

@ -19,10 +19,12 @@ xls.file.path=/tmp/xls
# Enterprise WeChat configuration
enterprise.wechat.corp.id=xxxxxxx
enterprise.wechat.secret=xxxxxxx
enterprise.wechat.agent.id=xxxxxxx
enterprise.wechat.users=xxxxxxx
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}

57
escheduler-alert/src/test/java/cn/escheduler/alert/utils/EnterpriseWeChatUtilsTest.java

@ -16,11 +16,10 @@
*/
package cn.escheduler.alert.utils;
import com.alibaba.fastjson.JSON;
import org.junit.Assert;
import org.junit.Test;
import com.alibaba.fastjson.JSON;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
@ -33,24 +32,23 @@ import java.util.Collection;
* enterprise.wechat.token.url
* enterprise.wechat.push.url
* enterprise.wechat.send.msg
* enterprise.wechat.agent.id
* enterprise.wechat.users
*/
public class EnterpriseWeChatUtilsTest {
private String agentId = PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_AGENT_ID); // app id
private Collection<String> listUserId = Arrays.asList(PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_USERS).split(","));
// Please change
private String agentId = "1000002"; // app id
private String partyId = "2";
private Collection<String> listPartyId = Arrays.asList("2","4");
private String userId = "test1";
private Collection<String> listUserId = Arrays.asList("test1","test2");
@Test
public void testSendSingleTeamWeChat() {
EnterpriseWeChatUtils wx = new EnterpriseWeChatUtils();
try {
String token = wx.getToken();
String msg = wx.makeTeamSendMsg(partyId, agentId, "hello world");
String resp = wx.sendQiyeWeixin("utf-8", msg, token);
String token = EnterpriseWeChatUtils.getToken();
String msg = EnterpriseWeChatUtils.makeTeamSendMsg(partyId, agentId, "hello world");
String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
@ -61,12 +59,11 @@ public class EnterpriseWeChatUtilsTest {
@Test
public void testSendMultiTeamWeChat() {
EnterpriseWeChatUtils wx = new EnterpriseWeChatUtils();
try {
String token = wx.getToken();
String msg = wx.makeTeamSendMsg(listPartyId, agentId, "hello world");
String resp = wx.sendQiyeWeixin("utf-8", msg, token);
String token = EnterpriseWeChatUtils.getToken();
String msg = EnterpriseWeChatUtils.makeTeamSendMsg(listPartyId, agentId, "hello world");
String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
@ -77,12 +74,23 @@ public class EnterpriseWeChatUtilsTest {
@Test
public void testSendSingleUserWeChat() {
EnterpriseWeChatUtils wx = new EnterpriseWeChatUtils();
try {
String token = wx.getToken();
String msg = wx.makeUserSendMsg(userId, agentId, "hello world");
String resp = wx.sendQiyeWeixin("utf-8", msg, token);
String token = EnterpriseWeChatUtils.getToken();
String msg = EnterpriseWeChatUtils.makeUserSendMsg(listUserId.stream().findFirst().get(), agentId, "您的会议室已经预定,稍后会同步到`邮箱` \n" +
">**事项详情** \n" +
">事 项:<font color='info'>开会</font> <br>" +
">组织者:@miglioguan \n" +
">参与者:@miglioguan、@kunliu、@jamdeezhou、@kanexiong、@kisonwang \n" +
"> \n" +
">会议室:<font color='info'>广州TIT 1楼 301</font> \n" +
">日 期:<font color='warning'>2018年5月18日</font> \n" +
">时 间:<font color='comment'>上午9:00-11:00</font> \n" +
"> \n" +
">请准时参加会议。 \n" +
"> \n" +
">如需修改会议信息,请点击:[修改会议信息](https://work.weixin.qq.com)\"");
String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");
@ -93,12 +101,11 @@ public class EnterpriseWeChatUtilsTest {
@Test
public void testSendMultiUserWeChat() {
EnterpriseWeChatUtils wx = new EnterpriseWeChatUtils();
try {
String token = wx.getToken();
String msg = wx.makeUserSendMsg(listUserId, agentId, "hello world");
String resp = wx.sendQiyeWeixin("utf-8", msg, token);
String token = EnterpriseWeChatUtils.getToken();
String msg = EnterpriseWeChatUtils.makeUserSendMsg(listUserId, agentId, "hello world");
String resp = EnterpriseWeChatUtils.sendEnterpriseWeChat("utf-8", msg, token);
String errmsg = JSON.parseObject(resp).getString("errmsg");
Assert.assertEquals(errmsg, "ok");

9
escheduler-api/pom.xml

@ -1,10 +1,9 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.4-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<artifactId>escheduler-api</artifactId>
<packaging>jar</packaging>
@ -48,6 +47,10 @@
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-tomcat</artifactId>
</exclusion>
<exclusion>
<artifactId>log4j-to-slf4j</artifactId>
<groupId>org.apache.logging.log4j</groupId>
</exclusion>
</exclusions>
</dependency>

47
escheduler-api/src/main/java/cn/escheduler/api/controller/DataSourceController.java

@ -18,10 +18,14 @@ package cn.escheduler.api.controller;
import cn.escheduler.api.enums.Status;
import cn.escheduler.api.service.DataSourceService;
import cn.escheduler.api.utils.CheckUtils;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.DbType;
import cn.escheduler.common.enums.ResUploadType;
import cn.escheduler.common.utils.CommonUtils;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.common.utils.PropertyUtils;
import cn.escheduler.dao.model.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
@ -34,9 +38,11 @@ import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.*;
import springfox.documentation.annotations.ApiIgnore;
import java.util.HashMap;
import java.util.Map;
import static cn.escheduler.api.enums.Status.*;
import static cn.escheduler.common.utils.PropertyUtils.getBoolean;
/**
@ -54,12 +60,16 @@ public class DataSourceController extends BaseController {
/**
* create data source
* 创建数据源
*
* @param loginUser
* @param name
* @param note
* @param type
* @param host
* @param port
* @param database
* @param principal
* @param userName
* @param password
* @param other
* @return
*/
@ -84,13 +94,14 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "host") String host,
@RequestParam(value = "port") String port,
@RequestParam(value = "database") String database,
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "other") String other) {
logger.info("login user {} create datasource ame: {}, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {},port: {},database : {},principal: {},userName : {} other: {}",
loginUser.getUserName(), name, note, type, host,port,database,principal,userName,other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
Map<String, Object> result = dataSourceService.createDataSource(loginUser, name, note, type, parameter);
return returnDataList(result);
@ -134,13 +145,14 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "host") String host,
@RequestParam(value = "port") String port,
@RequestParam(value = "database") String database,
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "other") String other) {
logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, other);
Map<String, Object> dataSource = dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter);
return returnDataList(dataSource);
} catch (Exception e) {
@ -269,13 +281,14 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "host") String host,
@RequestParam(value = "port") String port,
@RequestParam(value = "database") String database,
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
@RequestParam(value = "other") String other) {
logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, other: {}",
loginUser.getUserName(), name, note, type, other);
try {
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, userName, password, other);
String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
Boolean isConnection = dataSourceService.checkConnection(type, parameter);
Result result = new Result();
@ -429,4 +442,24 @@ public class DataSourceController extends BaseController {
return error(AUTHORIZED_DATA_SOURCE.getCode(), AUTHORIZED_DATA_SOURCE.getMsg());
}
}
/**
* get user info
*
* @param loginUser
* @return
*/
@ApiOperation(value = "getKerberosStartupState", notes= "GET_USER_INFO_NOTES")
@GetMapping(value="/kerberos-startup-state")
@ResponseStatus(HttpStatus.OK)
public Result getKerberosStartupState(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser){
logger.info("login user {},get kerberos startup state : {}", loginUser.getUserName());
try{
// if upload resource is HDFS and kerberos startup is true , else false
return success(Status.SUCCESS.getMsg(), CommonUtils.getKerberosStartupState());
}catch (Exception e){
logger.error(KERBEROS_STARTUP_STATE.getMsg(),e);
return error(Status.KERBEROS_STARTUP_STATE.getCode(), Status.KERBEROS_STARTUP_STATE.getMsg());
}
}
}

4
escheduler-api/src/main/java/cn/escheduler/api/controller/MonitorController.java

@ -66,7 +66,7 @@ public class MonitorController extends BaseController{
logger.info("login user: {}, query all master", loginUser.getUserName());
try{
logger.info("list master, user:{}", loginUser.getUserName());
Map<String, Object> result = serverService.queryMaster(loginUser);
Map<String, Object> result = monitorService.queryMaster(loginUser);
return returnDataList(result);
}catch (Exception e){
logger.error(LIST_MASTERS_ERROR.getMsg(),e);
@ -86,7 +86,7 @@ public class MonitorController extends BaseController{
public Result listWorker(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) {
logger.info("login user: {}, query all workers", loginUser.getUserName());
try{
Map<String, Object> result = serverService.queryWorker(loginUser);
Map<String, Object> result = monitorService.queryWorker(loginUser);
return returnDataList(result);
}catch (Exception e){
logger.error(LIST_WORKERS_ERROR.getMsg(),e);

29
escheduler-api/src/main/java/cn/escheduler/api/controller/SchedulerController.java

@ -304,4 +304,33 @@ public class SchedulerController extends BaseController {
return error(Status.QUERY_SCHEDULE_LIST_ERROR.getCode(), Status.QUERY_SCHEDULE_LIST_ERROR.getMsg());
}
}
/**
* preview schedule
*
* @param loginUser
* @param projectName
* @param schedule
* @return
*/
@ApiOperation(value = "previewSchedule", notes= "PREVIEW_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"),
})
@PostMapping("/preview")
@ResponseStatus(HttpStatus.CREATED)
public Result previewSchedule(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser,
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam(value = "schedule") String schedule
){
logger.info("login user {}, project name: {}, preview schedule: {}",
loginUser.getUserName(), projectName, schedule);
try {
Map<String, Object> result = schedulerService.previewSchedule(loginUser, projectName, schedule);
return returnDataList(result);
} catch (Exception e) {
logger.error(PREVIEW_SCHEDULE_ERROR.getMsg(), e);
return error(PREVIEW_SCHEDULE_ERROR.getCode(), PREVIEW_SCHEDULE_ERROR.getMsg());
}
}
}

6
escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java

@ -161,7 +161,9 @@ public enum Status {
SAVE_ERROR(10136, "save error"),
DELETE_PROJECT_ERROR_DEFINES_NOT_NULL(10137, "please delete the process definitions in project first!"),
BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR(10117,"batch delete process instance by ids {0} error"),
PREVIEW_SCHEDULE_ERROR(10139,"preview schedule error"),
PARSE_TO_CRON_EXPRESSION_ERROR(10140,"parse cron to cron expression error"),
SCHEDULE_START_TIME_END_TIME_SAME(10141,"The start time must not be the same as the end"),
UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found"),
@ -210,6 +212,7 @@ public enum Status {
DELETE_SCHEDULE_CRON_BY_ID_ERROR(50024,"delete schedule by id error"),
BATCH_DELETE_PROCESS_DEFINE_ERROR(50025,"batch delete process definition error"),
BATCH_DELETE_PROCESS_DEFINE_BY_IDS_ERROR(50026,"batch delete process definition by ids {0} error"),
TENANT_NOT_SUITABLE(50027,"there is not any tenant suitable, please choose a tenant available."),
HDFS_NOT_STARTUP(60001,"hdfs not startup"),
HDFS_TERANT_RESOURCES_FILE_EXISTS(60002,"resource file exists,please delete resource first"),
@ -232,6 +235,7 @@ public enum Status {
QUEUE_COUNT_ERROR(90001,"queue count error"),
KERBEROS_STARTUP_STATE(100001,"get kerberos startup state error"),
;
private int code;

98
escheduler-api/src/main/java/cn/escheduler/api/service/DataSourceService.java

@ -21,7 +21,10 @@ import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.PageInfo;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.DbType;
import cn.escheduler.common.enums.UserType;
import cn.escheduler.common.job.db.*;
import cn.escheduler.common.utils.CommonUtils;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.dao.mapper.DataSourceMapper;
import cn.escheduler.dao.mapper.DatasourceUserMapper;
import cn.escheduler.dao.mapper.ProjectMapper;
@ -30,6 +33,8 @@ import cn.escheduler.dao.model.Resource;
import cn.escheduler.dao.model.User;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -41,6 +46,8 @@ import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.*;
import static cn.escheduler.common.utils.PropertyUtils.getString;
/**
* datasource service
*/
@ -54,19 +61,16 @@ public class DataSourceService extends BaseService{
public static final String TYPE = "type";
public static final String HOST = "host";
public static final String PORT = "port";
public static final String PRINCIPAL = "principal";
public static final String DATABASE = "database";
public static final String USER_NAME = "userName";
public static final String PASSWORD = "password";
public static final String PASSWORD = cn.escheduler.common.Constants.PASSWORD;
public static final String OTHER = "other";
@Autowired
private ProjectMapper projectMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProjectService projectService;
@Autowired
private DatasourceUserMapper datasourceUserMapper;
@ -239,6 +243,7 @@ public class DataSourceService extends BaseService{
map.put(TYPE, dataSourceType);
map.put(HOST, host);
map.put(PORT, port);
map.put(PRINCIPAL, datasourceForm.getPrincipal());
map.put(DATABASE, database);
map.put(USER_NAME, datasourceForm.getUser());
map.put(PASSWORD, datasourceForm.getPassword());
@ -284,13 +289,37 @@ public class DataSourceService extends BaseService{
* @return
*/
private List<DataSource> getDataSources(User loginUser, String searchVal, Integer pageSize, PageInfo pageInfo) {
List<DataSource> dataSourceList = null;
if (isAdmin(loginUser)) {
return dataSourceMapper.queryAllDataSourcePaging(searchVal, pageInfo.getStart(), pageSize);
dataSourceList = dataSourceMapper.queryAllDataSourcePaging(searchVal, pageInfo.getStart(), pageSize);
}else{
dataSourceList = dataSourceMapper.queryDataSourcePaging(loginUser.getId(), searchVal,
pageInfo.getStart(), pageSize);
}
return dataSourceMapper.queryDataSourcePaging(loginUser.getId(), searchVal,
pageInfo.getStart(), pageSize);
handlePasswd(dataSourceList);
return dataSourceList;
}
/**
* handle datasource connection password for safety
* @param dataSourceList
*/
private void handlePasswd(List<DataSource> dataSourceList) {
for (DataSource dataSource : dataSourceList) {
String connectionParams = dataSource.getConnectionParams();
JSONObject object = JSONObject.parseObject(connectionParams);
object.put(cn.escheduler.common.Constants.PASSWORD, cn.escheduler.common.Constants.XXXXXX);
dataSource.setConnectionParams(JSONUtils.toJson(object));
}
}
/**
* get datasource total num
*
@ -313,7 +342,14 @@ public class DataSourceService extends BaseService{
*/
public Map<String, Object> queryDataSourceList(User loginUser, Integer type) {
Map<String, Object> result = new HashMap<>(5);
List<DataSource> datasourceList = dataSourceMapper.queryDataSourceByType(loginUser.getId(), type);
List<DataSource> datasourceList;
if (isAdmin(loginUser)) {
datasourceList = dataSourceMapper.listAllDataSourceByType(type);
}else{
datasourceList = dataSourceMapper.queryDataSourceByType(loginUser.getId(), type);
}
result.put(Constants.DATA_LIST, datasourceList);
putMsg(result, Status.SUCCESS);
@ -362,11 +398,21 @@ public class DataSourceService extends BaseService{
Class.forName(Constants.COM_MYSQL_JDBC_DRIVER);
break;
case HIVE:
datasource = JSONObject.parseObject(parameter, HiveDataSource.class);
Class.forName(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER);
break;
case SPARK:
datasource = JSONObject.parseObject(parameter, SparkDataSource.class);
if (CommonUtils.getKerberosStartupState()) {
System.setProperty(cn.escheduler.common.Constants.JAVA_SECURITY_KRB5_CONF,
getString(cn.escheduler.common.Constants.JAVA_SECURITY_KRB5_CONF_PATH));
Configuration configuration = new Configuration();
configuration.set(cn.escheduler.common.Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(getString(cn.escheduler.common.Constants.LOGIN_USER_KEY_TAB_USERNAME),
getString(cn.escheduler.common.Constants.LOGIN_USER_KEY_TAB_PATH));
}
if (dbType == DbType.HIVE){
datasource = JSONObject.parseObject(parameter, HiveDataSource.class);
}else if (dbType == DbType.SPARK){
datasource = JSONObject.parseObject(parameter, SparkDataSource.class);
}
Class.forName(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER);
break;
case CLICKHOUSE:
@ -442,10 +488,18 @@ public class DataSourceService extends BaseService{
* @param other
* @return
*/
public String buildParameter(String name, String desc, DbType type, String host, String port, String database, String userName, String password, String other) {
public String buildParameter(String name, String desc, DbType type, String host,
String port, String database,String principal,String userName,
String password, String other) {
String address = buildAddress(type, host, port);
String jdbcUrl = address + "/" + database;
if (CommonUtils.getKerberosStartupState() &&
(type == DbType.HIVE || type == DbType.SPARK)){
jdbcUrl += ";principal=" + principal;
}
String separator = "";
if (Constants.MYSQL.equals(type.name())
|| Constants.POSTGRESQL.equals(type.name())
@ -464,6 +518,10 @@ public class DataSourceService extends BaseService{
parameterMap.put(Constants.JDBC_URL, jdbcUrl);
parameterMap.put(Constants.USER, userName);
parameterMap.put(Constants.PASSWORD, password);
if (CommonUtils.getKerberosStartupState() &&
(type == DbType.HIVE || type == DbType.SPARK)){
parameterMap.put(Constants.PRINCIPAL,principal);
}
if (other != null && !"".equals(other)) {
Map map = JSONObject.parseObject(other, new TypeReference<LinkedHashMap<String, String>>() {
});
@ -537,7 +595,7 @@ public class DataSourceService extends BaseService{
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if(loginUser.getId() != dataSource.getUserId()){
if(loginUser.getId() != dataSource.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER){
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
@ -622,13 +680,13 @@ public class DataSourceService extends BaseService{
*/
private String[] getHostsAndPort(String address) {
String[] result = new String[2];
String[] tmpArray = address.split("//");
String[] tmpArray = address.split(cn.escheduler.common.Constants.DOUBLE_SLASH);
String hostsAndPorts = tmpArray[tmpArray.length - 1];
StringBuilder hosts = new StringBuilder("");
String[] hostPortArray = hostsAndPorts.split(",");
String port = hostPortArray[0].split(":")[1];
StringBuilder hosts = new StringBuilder();
String[] hostPortArray = hostsAndPorts.split(cn.escheduler.common.Constants.COMMA);
String port = hostPortArray[0].split(cn.escheduler.common.Constants.COLON)[1];
for (String hostPort : hostPortArray) {
hosts.append(hostPort.split(":")[0]).append(",");
hosts.append(hostPort.split(cn.escheduler.common.Constants.COLON)[0]).append(cn.escheduler.common.Constants.COMMA);
}
hosts.deleteCharAt(hosts.length() - 1);
result[0] = hosts.toString();

29
escheduler-api/src/main/java/cn/escheduler/api/service/ExecutorService.java

@ -110,6 +110,13 @@ public class ExecutorService extends BaseService{
return result;
}
if (!checkTenantSuitable(processDefinition)){
logger.error("there is not any vaild tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
return result;
}
/**
* create command
*/
@ -190,6 +197,11 @@ public class ExecutorService extends BaseService{
if (status != Status.SUCCESS) {
return checkResult;
}
if (!checkTenantSuitable(processDefinition)){
logger.error("there is not any vaild tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
}
switch (executeType) {
case REPEAT_RUNNING:
@ -230,6 +242,21 @@ public class ExecutorService extends BaseService{
return result;
}
/**
* check tenant suitable
* @param processDefinition
* @return
*/
private boolean checkTenantSuitable(ProcessDefinition processDefinition) {
// checkTenantExists();
Tenant tenant = processDao.getTenantForProcess(processDefinition.getTenantId(),
processDefinition.getUserId());
if(tenant == null){
return false;
}
return true;
}
/**
* Check the state of process instance and the type of operation match
*
@ -260,7 +287,7 @@ public class ExecutorService extends BaseService{
}
break;
case RECOVER_SUSPENDED_PROCESS:
if (executionStatus.typeIsPause()) {
if (executionStatus.typeIsPause()|| executionStatus.typeIsCancel()) {
checkResult = true;
}
default:

41
escheduler-api/src/main/java/cn/escheduler/api/service/MonitorService.java

@ -18,13 +18,16 @@ package cn.escheduler.api.service;
import cn.escheduler.api.enums.Status;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.ZookeeperMonitorUtils;
import cn.escheduler.api.utils.ZookeeperMonitor;
import cn.escheduler.dao.MonitorDBDao;
import cn.escheduler.dao.model.MasterServer;
import cn.escheduler.dao.model.MonitorRecord;
import cn.escheduler.dao.model.User;
import cn.escheduler.dao.model.ZookeeperRecord;
import org.apache.hadoop.mapred.Master;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -52,6 +55,22 @@ public class MonitorService extends BaseService{
}
/**
* query master list
*
* @param loginUser
* @return
*/
public Map<String,Object> queryMaster(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<MasterServer> masterServers = new ZookeeperMonitor().getMasterServers();
result.put(Constants.DATA_LIST, masterServers);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query zookeeper state
@ -61,7 +80,7 @@ public class MonitorService extends BaseService{
public Map<String,Object> queryZookeeperState(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<ZookeeperRecord> zookeeperRecordList = ZookeeperMonitorUtils.zookeeperInfoList();
List<ZookeeperRecord> zookeeperRecordList = ZookeeperMonitor.zookeeperInfoList();
result.put(Constants.DATA_LIST, zookeeperRecordList);
putMsg(result, Status.SUCCESS);
@ -69,4 +88,22 @@ public class MonitorService extends BaseService{
return result;
}
/**
* query master list
*
* @param loginUser
* @return
*/
public Map<String,Object> queryWorker(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
List<MasterServer> workerServers = new ZookeeperMonitor().getWorkerServers();
result.put(Constants.DATA_LIST, workerServers);
putMsg(result,Status.SUCCESS);
return result;
}
}

5
escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java

@ -24,6 +24,7 @@ import cn.escheduler.api.utils.PageInfo;
import cn.escheduler.common.enums.Flag;
import cn.escheduler.common.enums.ReleaseState;
import cn.escheduler.common.enums.TaskType;
import cn.escheduler.common.enums.UserType;
import cn.escheduler.common.graph.DAG;
import cn.escheduler.common.model.TaskNode;
import cn.escheduler.common.model.TaskNodeRelation;
@ -127,6 +128,7 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefine.setLocations(locations);
processDefine.setConnects(connects);
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
//custom global params
List<Property> globalParamsList = processData.getGlobalParams();
@ -291,6 +293,7 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefine.setLocations(locations);
processDefine.setConnects(connects);
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
//custom global params
List<Property> globalParamsList = new ArrayList<>();
@ -365,7 +368,7 @@ public class ProcessDefinitionService extends BaseDAGService {
}
// Determine if the login user is the owner of the process definition
if (loginUser.getId() != processDefinition.getUserId()) {
if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}

30
escheduler-api/src/main/java/cn/escheduler/api/service/ProcessInstanceService.java

@ -38,10 +38,7 @@ import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.common.utils.placeholder.BusinessTimeUtils;
import cn.escheduler.dao.ProcessDao;
import cn.escheduler.dao.mapper.ProcessDefinitionMapper;
import cn.escheduler.dao.mapper.ProcessInstanceMapper;
import cn.escheduler.dao.mapper.ProjectMapper;
import cn.escheduler.dao.mapper.TaskInstanceMapper;
import cn.escheduler.dao.mapper.*;
import cn.escheduler.dao.model.*;
import com.alibaba.fastjson.JSON;
import org.apache.commons.lang3.StringUtils;
@ -97,6 +94,9 @@ public class ProcessInstanceService extends BaseDAGService {
@Autowired
LoggerService loggerService;
@Autowired
WorkerGroupMapper workerGroupMapper;
/**
* query process instance by id
*
@ -115,6 +115,21 @@ public class ProcessInstanceService extends BaseDAGService {
return checkResult;
}
ProcessInstance processInstance = processDao.findProcessInstanceDetailById(processId);
String workerGroupName = "";
if(processInstance.getWorkerGroupId() == -1){
workerGroupName = DEFAULT;
}else{
WorkerGroup workerGroup = workerGroupMapper.queryById(processInstance.getWorkerGroupId());
if(workerGroup != null){
workerGroupName = DEFAULT;
}else{
workerGroupName = workerGroup.getName();
}
}
processInstance.setWorkerGroupName(workerGroupName);
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
processInstance.setReceivers(processDefinition.getReceivers());
processInstance.setReceiversCc(processDefinition.getReceiversCc());
result.put(Constants.DATA_LIST, processInstance);
putMsg(result, Status.SUCCESS);
@ -364,6 +379,7 @@ public class ProcessInstanceService extends BaseDAGService {
String globalParams = null;
String originDefParams = null;
int timeout = processInstance.getTimeout();
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
if (StringUtils.isNotEmpty(processInstanceJson)) {
ProcessData processData = JSONUtils.parseObject(processInstanceJson, ProcessData.class);
//check workflow json is valid
@ -379,6 +395,11 @@ public class ProcessInstanceService extends BaseDAGService {
processInstance.getCmdTypeIfComplement(), schedule);
timeout = processData.getTimeout();
processInstance.setTimeout(timeout);
Tenant tenant = processDao.getTenantForProcess(processData.getTenantId(),
processDefinition.getUserId());
if(tenant != null){
processInstance.setTenantCode(tenant.getTenantCode());
}
processInstance.setProcessInstanceJson(processInstanceJson);
processInstance.setGlobalParams(globalParams);
}
@ -387,7 +408,6 @@ public class ProcessInstanceService extends BaseDAGService {
int update = processDao.updateProcessInstance(processInstance);
int updateDefine = 1;
if (syncDefine && StringUtils.isNotEmpty(processInstanceJson)) {
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
processDefinition.setProcessDefinitionJson(processInstanceJson);
processDefinition.setGlobalParams(originDefParams);
processDefinition.setLocations(locations);

9
escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java

@ -76,15 +76,6 @@ public class ProjectService extends BaseService{
return descCheck;
}
/**
* only general users can create projects. administrators have no corresponding tenants and can only view
* 管理员没有对应的租户,只能查看,只有普通用户才可以创建项目
*/
if (!userService.isGeneral(loginUser)) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
Project project = projectMapper.queryByName(name);
if (project != null) {
putMsg(result, Status.PROJECT_ALREADY_EXISTS, name);

50
escheduler-api/src/main/java/cn/escheduler/api/service/ResourcesService.java

@ -21,6 +21,7 @@ import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.PageInfo;
import cn.escheduler.api.utils.Result;
import cn.escheduler.common.enums.ResourceType;
import cn.escheduler.common.enums.UserType;
import cn.escheduler.common.utils.FileUtils;
import cn.escheduler.common.utils.HadoopUtils;
import cn.escheduler.common.utils.PropertyUtils;
@ -85,8 +86,8 @@ public class ResourcesService extends BaseService {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -184,9 +185,9 @@ public class ResourcesService extends BaseService {
ResourceType type) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -368,7 +369,12 @@ public class ResourcesService extends BaseService {
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
List<Resource> resourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), type.ordinal());
List<Resource> resourceList;
if(isAdmin(loginUser)){
resourceList = resourcesMapper.listAllResourceByType(type.ordinal());
}else{
resourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), type.ordinal());
}
result.put(Constants.DATA_LIST, resourceList);
putMsg(result,Status.SUCCESS);
@ -385,9 +391,9 @@ public class ResourcesService extends BaseService {
public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -399,7 +405,7 @@ public class ResourcesService extends BaseService {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (loginUser.getId() != resource.getUserId()) {
if (loginUser.getId() != resource.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
@ -483,9 +489,9 @@ public class ResourcesService extends BaseService {
public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -550,9 +556,9 @@ public class ResourcesService extends BaseService {
@Transactional(value = "TransactionManager",rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -611,9 +617,9 @@ public class ResourcesService extends BaseService {
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -706,9 +712,9 @@ public class ResourcesService extends BaseService {
* @return
*/
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup");
}

54
escheduler-api/src/main/java/cn/escheduler/api/service/SchedulerService.java

@ -19,14 +19,13 @@ package cn.escheduler.api.service;
import cn.escheduler.api.dto.ScheduleParam;
import cn.escheduler.api.enums.Status;
import cn.escheduler.server.quartz.ProcessScheduleJob;
import cn.escheduler.server.quartz.QuartzExecutors;
import cn.escheduler.api.utils.Constants;
import cn.escheduler.api.utils.PageInfo;
import cn.escheduler.common.enums.FailureStrategy;
import cn.escheduler.common.enums.Priority;
import cn.escheduler.common.enums.ReleaseState;
import cn.escheduler.common.enums.WarningType;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.dao.ProcessDao;
import cn.escheduler.dao.mapper.MasterServerMapper;
@ -34,7 +33,11 @@ import cn.escheduler.dao.mapper.ProcessDefinitionMapper;
import cn.escheduler.dao.mapper.ProjectMapper;
import cn.escheduler.dao.mapper.ScheduleMapper;
import cn.escheduler.dao.model.*;
import cn.escheduler.dao.utils.cron.CronUtils;
import cn.escheduler.server.quartz.ProcessScheduleJob;
import cn.escheduler.server.quartz.QuartzExecutors;
import org.apache.commons.lang3.StringUtils;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -42,6 +45,7 @@ import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.io.IOException;
import java.text.ParseException;
import java.util.*;
/**
@ -115,6 +119,11 @@ public class SchedulerService extends BaseService {
scheduleObj.setProcessDefinitionName(processDefinition.getName());
ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class);
if (DateUtils.differSec(scheduleParam.getStartTime(),scheduleParam.getEndTime()) == 0) {
logger.warn("The start time must not be the same as the end");
putMsg(result,Status.SCHEDULE_START_TIME_END_TIME_SAME);
return result;
}
scheduleObj.setStartTime(scheduleParam.getStartTime());
scheduleObj.setEndTime(scheduleParam.getEndTime());
if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) {
@ -201,6 +210,11 @@ public class SchedulerService extends BaseService {
// updateProcessInstance param
if (StringUtils.isNotEmpty(scheduleExpression)) {
ScheduleParam scheduleParam = JSONUtils.parseObject(scheduleExpression, ScheduleParam.class);
if (DateUtils.differSec(scheduleParam.getStartTime(),scheduleParam.getEndTime()) == 0) {
logger.warn("The start time must not be the same as the end");
putMsg(result,Status.SCHEDULE_START_TIME_END_TIME_SAME);
return result;
}
schedule.setStartTime(scheduleParam.getStartTime());
schedule.setEndTime(scheduleParam.getEndTime());
if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) {
@ -442,14 +456,14 @@ public class SchedulerService extends BaseService {
/**
* delete schedule
*/
public static void deleteSchedule(int projectId, int processId) throws RuntimeException{
logger.info("delete schedules of project id:{}, flow id:{}", projectId, processId);
public static void deleteSchedule(int projectId, int scheduleId) throws RuntimeException{
logger.info("delete schedules of project id:{}, schedule id:{}", projectId, scheduleId);
String jobName = QuartzExecutors.buildJobName(processId);
String jobName = QuartzExecutors.buildJobName(scheduleId);
String jobGroupName = QuartzExecutors.buildJobGroupName(projectId);
if(!QuartzExecutors.getInstance().deleteJob(jobName, jobGroupName)){
logger.warn("set offline failure:projectId:{},processId:{}",projectId,processId);
logger.warn("set offline failure:projectId:{},scheduleId:{}",projectId,scheduleId);
throw new RuntimeException(String.format("set offline failure"));
}
@ -537,4 +551,32 @@ public class SchedulerService extends BaseService {
}
return result;
}
/**
* preview schedule
* @param loginUser
* @param projectName
* @param schedule
* @return
*/
public Map<String,Object> previewSchedule(User loginUser, String projectName, String schedule) {
Map<String, Object> result = new HashMap<>(5);
CronExpression cronExpression;
ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class);
Date now = new Date();
Date startTime = now.after(scheduleParam.getStartTime()) ? now : scheduleParam.getStartTime();
Date endTime = scheduleParam.getEndTime();
try {
cronExpression = CronUtils.parse2CronExpression(scheduleParam.getCrontab());
} catch (ParseException e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.PARSE_TO_CRON_EXPRESSION_ERROR);
return result;
}
List<Date> selfFireDateList = CronUtils.getSelfFireDateList(startTime, endTime,cronExpression);
result.put(Constants.DATA_LIST, selfFireDateList.stream().map(t -> DateUtils.dateToString(t)).limit(cn.escheduler.common.Constants.PREVIEW_SCHEDULE_EXECUTE_COUNT));
putMsg(result, Status.SUCCESS);
return result;
}
}

38
escheduler-api/src/main/java/cn/escheduler/api/service/TenantService.java

@ -96,7 +96,7 @@ public class TenantService extends BaseService{
tenantMapper.insert(tenant);
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
if (PropertyUtils.getResUploadStartupState()){
String resourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + tenantCode + "/resources";
String udfsPath = HadoopUtils.getHdfsUdfDir(tenantCode);
/**
@ -178,7 +178,7 @@ public class TenantService extends BaseService{
Tenant newTenant = tenantMapper.queryByTenantCode(tenantCode);
if (newTenant == null){
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
if (PropertyUtils.getResUploadStartupState()){
String resourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + tenantCode + "/resources";
String udfsPath = HadoopUtils.getHdfsUdfDir(tenantCode);
//init hdfs resource
@ -235,28 +235,29 @@ public class TenantService extends BaseService{
return result;
}
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
// if resource upload startup
if (PropertyUtils.getResUploadStartupState()){
String tenantPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode();
String resourcePath = HadoopUtils.getHdfsDir(tenant.getTenantCode());
FileStatus[] fileStatus = HadoopUtils.getInstance().listFileStatus(resourcePath);
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_RESOURCES_FILE_EXISTS);
return result;
}
fileStatus = HadoopUtils.getInstance().listFileStatus(HadoopUtils.getHdfsUdfDir(tenant.getTenantCode()));
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_UDFS_FILE_EXISTS);
return result;
}
if (HadoopUtils.getInstance().exists(tenantPath)){
String resourcePath = HadoopUtils.getHdfsDir(tenant.getTenantCode());
FileStatus[] fileStatus = HadoopUtils.getInstance().listFileStatus(resourcePath);
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_RESOURCES_FILE_EXISTS);
return result;
}
fileStatus = HadoopUtils.getInstance().listFileStatus(HadoopUtils.getHdfsUdfDir(tenant.getTenantCode()));
if (fileStatus.length > 0) {
putMsg(result, Status.HDFS_TERANT_UDFS_FILE_EXISTS);
return result;
}
HadoopUtils.getInstance().delete(tenantPath, true);
HadoopUtils.getInstance().delete(tenantPath, true);
}
}
tenantMapper.deleteById(id);
putMsg(result, Status.SUCCESS);
return result;
}
@ -269,9 +270,6 @@ public class TenantService extends BaseService{
public Map<String, Object> queryTenantList(User loginUser) {
Map<String, Object> result = new HashMap<>(5);
if (checkAdmin(loginUser, result)) {
return result;
}
List<Tenant> resourceList = tenantMapper.queryAllTenant();
result.put(Constants.DATA_LIST, resourceList);

12
escheduler-api/src/main/java/cn/escheduler/api/service/UdfFuncService.java

@ -80,9 +80,9 @@ public class UdfFuncService extends BaseService{
int resourceId) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
@ -167,9 +167,9 @@ public class UdfFuncService extends BaseService{
// verify udfFunc is exist
UdfFunc udf = udfFuncMapper.queryUdfById(udfFuncId);
// if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE));
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}

54
escheduler-api/src/main/java/cn/escheduler/api/service/UsersService.java

@ -125,7 +125,7 @@ public class UsersService extends BaseService {
Tenant tenant = tenantMapper.queryById(tenantId);
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
if (PropertyUtils.getResUploadStartupState()){
String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode() + "/home/" + user.getId();
HadoopUtils.getInstance().mkdir(userPath);
@ -245,35 +245,35 @@ public class UsersService extends BaseService {
Tenant newTenant = tenantMapper.queryById(tenantId);
if (newTenant != null) {
// if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
if (PropertyUtils.getResUploadStartupState() && oldTenant != null){
String newTenantCode = newTenant.getTenantCode();
String oldResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/resources";
String oldUdfsPath = HadoopUtils.getHdfsUdfDir(oldTenant.getTenantCode());
if (HadoopUtils.getInstance().exists(oldResourcePath)){
String newResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenantCode + "/resources";
String newUdfsPath = HadoopUtils.getHdfsUdfDir(newTenantCode);
String newResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenantCode + "/resources";
String newUdfsPath = HadoopUtils.getHdfsUdfDir(newTenantCode);
//file resources list
List<Resource> fileResourcesList = resourceMapper.queryResourceCreatedByUser(userId, 0);
if (CollectionUtils.isNotEmpty(fileResourcesList)) {
for (Resource resource : fileResourcesList) {
HadoopUtils.getInstance().copy(oldResourcePath + "/" + resource.getAlias(), newResourcePath, false, true);
//file resources list
List<Resource> fileResourcesList = resourceMapper.queryResourceCreatedByUser(userId, 0);
if (CollectionUtils.isNotEmpty(fileResourcesList)) {
for (Resource resource : fileResourcesList) {
HadoopUtils.getInstance().copy(oldResourcePath + "/" + resource.getAlias(), newResourcePath, false, true);
}
}
}
//udf resources
List<Resource> udfResourceList = resourceMapper.queryResourceCreatedByUser(userId, 1);
if (CollectionUtils.isNotEmpty(udfResourceList)) {
for (Resource resource : udfResourceList) {
HadoopUtils.getInstance().copy(oldUdfsPath + "/" + resource.getAlias(), newUdfsPath, false, true);
//udf resources
List<Resource> udfResourceList = resourceMapper.queryResourceCreatedByUser(userId, 1);
if (CollectionUtils.isNotEmpty(udfResourceList)) {
for (Resource resource : udfResourceList) {
HadoopUtils.getInstance().copy(oldUdfsPath + "/" + resource.getAlias(), newUdfsPath, false, true);
}
}
}
//Delete the user from the old tenant directory
String oldUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/home/" + userId;
HadoopUtils.getInstance().delete(oldUserPath, true);
//Delete the user from the old tenant directory
String oldUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/home/" + userId;
HadoopUtils.getInstance().delete(oldUserPath, true);
}
//create user in the new tenant directory
String newUserPath = HadoopUtils.getHdfsDataBasePath() + "/" + newTenant.getTenantCode() + "/home/" + user.getId();
@ -307,11 +307,13 @@ public class UsersService extends BaseService {
// delete user
User user = userMapper.queryTenantCodeByUserId(id);
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){
String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + user.getTenantCode() + "/home/" + id;
HadoopUtils.getInstance().delete(userPath, true);
if (user != null) {
if (PropertyUtils.getResUploadStartupState()) {
String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + user.getTenantCode() + "/home/" + id;
if (HadoopUtils.getInstance().exists(userPath)) {
HadoopUtils.getInstance().delete(userPath, true);
}
}
}
userMapper.delete(id);

4
escheduler-api/src/main/java/cn/escheduler/api/utils/CheckUtils.java

@ -18,8 +18,10 @@ package cn.escheduler.api.utils;
import cn.escheduler.api.enums.Status;
import cn.escheduler.common.enums.ResUploadType;
import cn.escheduler.common.task.AbstractParameters;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.common.utils.PropertyUtils;
import cn.escheduler.common.utils.TaskParametersUtils;
import org.apache.commons.lang.StringUtils;
@ -28,6 +30,7 @@ import java.util.HashMap;
import java.util.Map;
import java.util.regex.Pattern;
import static cn.escheduler.common.utils.PropertyUtils.getBoolean;
/**
@ -157,5 +160,4 @@ public class CheckUtils {
return pattern.matcher(str).matches();
}
}

1
escheduler-api/src/main/java/cn/escheduler/api/utils/Constants.java

@ -111,6 +111,7 @@ public class Constants {
public static final String ADDRESS = "address";
public static final String DATABASE = "database";
public static final String JDBC_URL = "jdbcUrl";
public static final String PRINCIPAL = "principal";
public static final String USER = "user";
public static final String PASSWORD = "password";
public static final String OTHER = "other";

39
escheduler-api/src/main/java/cn/escheduler/api/utils/ZookeeperMonitorUtils.java → escheduler-api/src/main/java/cn/escheduler/api/utils/ZookeeperMonitor.java

@ -1,7 +1,9 @@
package cn.escheduler.api.utils;
import cn.escheduler.common.zk.AbstractZKClient;
import cn.escheduler.dao.model.MasterServer;
import cn.escheduler.dao.model.ZookeeperRecord;
import cn.escheduler.server.ResInfo;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -9,14 +11,15 @@ import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* monitor zookeeper info
*/
public class ZookeeperMonitorUtils {
public class ZookeeperMonitor extends AbstractZKClient{
private static final Logger LOG = LoggerFactory.getLogger(ZookeeperMonitorUtils.class);
private static final Logger LOG = LoggerFactory.getLogger(ZookeeperMonitor.class);
private static final String zookeeperList = AbstractZKClient.getZookeeperQuorum();
/**
@ -33,6 +36,38 @@ public class ZookeeperMonitorUtils {
return null;
}
/**
* get server list.
* @param isMaster
* @return
*/
public List<MasterServer> getServers(boolean isMaster){
List<MasterServer> masterServers = new ArrayList<>();
Map<String, String> masterMap = getServerList(isMaster);
String parentPath = isMaster ? getMasterZNodeParentPath() : getWorkerZNodeParentPath();
for(String path : masterMap.keySet()){
MasterServer masterServer = ResInfo.parseHeartbeatForZKInfo(masterMap.get(path));
masterServer.setZkDirectory( parentPath + "/"+ path);
masterServers.add(masterServer);
}
return masterServers;
}
/**
* get master servers
* @return
*/
public List<MasterServer> getMasterServers(){
return getServers(true);
}
/**
* master construct is the same with worker, use the master instead
* @return
*/
public List<MasterServer> getWorkerServers(){
return getServers(false);
}
private static List<ZookeeperRecord> zookeeperInfoList(String zookeeperServers) {

42
escheduler-api/src/main/resources/logback.xml

@ -1,42 +0,0 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<property name="log.base" value="logs" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- Log level filter -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<file>${log.base}/escheduler-api-server.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT" />
</root>
</configuration>

13
escheduler-api/src/test/java/cn/escheduler/api/controller/SchedulerControllerTest.java

@ -64,4 +64,17 @@ public class SchedulerControllerTest {
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void previewSchedule() throws Exception {
MvcResult mvcResult = mockMvc.perform(post("/projects/{projectName}/schedule/preview","li_test_1")
.header("sessionId", "c24ed9d9-1c20-48a0-bd9c-5cfca14a4dcb")
.param("schedule","{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
}

29
escheduler-api/src/test/java/cn/escheduler/api/utils/ZookeeperMonitorUtilsTest.java

@ -0,0 +1,29 @@
package cn.escheduler.api.utils;
import cn.escheduler.dao.model.MasterServer;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
public class ZookeeperMonitorUtilsTest {
@Test
public void testGetMasterLsit(){
ZookeeperMonitor zookeeperMonitor = new ZookeeperMonitor();
List<MasterServer> masterServerList = zookeeperMonitor.getMasterServers();
List<MasterServer> workerServerList = zookeeperMonitor.getWorkerServers();
Assert.assertEquals(masterServerList.size(), 1);
Assert.assertEquals(workerServerList.size(), 1);
}
}

2
escheduler-common/pom.xml

@ -4,7 +4,7 @@
<parent>
<artifactId>escheduler</artifactId>
<groupId>cn.analysys</groupId>
<version>1.0.4-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<artifactId>escheduler-common</artifactId>
<name>escheduler-common</name>

77
escheduler-common/src/main/java/cn/escheduler/common/Constants.java

@ -60,6 +60,23 @@ public final class Constants {
*/
public static final String FS_DEFAULTFS = "fs.defaultFS";
/**
* fs s3a endpoint
*/
public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint";
/**
* fs s3a access key
*/
public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key";
/**
* fs s3a secret key
*/
public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key";
/**
* yarn.resourcemanager.ha.rm.idsfs.defaultFS
*/
@ -70,6 +87,11 @@ public final class Constants {
*/
public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address";
/**
* hdfs configuration
* hdfs.root.user
*/
public static final String HDFS_ROOT_USER = "hdfs.root.user";
/**
* hdfs configuration
@ -118,9 +140,9 @@ public final class Constants {
public static final String DEVELOPMENT_STATE = "development.state";
/**
* hdfs.startup.state
* res.upload.startup.type
*/
public static final String HDFS_STARTUP_STATE = "hdfs.startup.state";
public static final String RES_UPLOAD_STARTUP_TYPE = "res.upload.startup.type";
/**
* zookeeper quorum
@ -246,7 +268,11 @@ public final class Constants {
*/
public static final String SCHEDULER_TASKS_QUEUE = "tasks_queue";
/**
* escheduler need kill tasks queue
*/
public static final String SCHEDULER_TASKS_KILL = "tasks_kill";
public static final String ZOOKEEPER_SCHEDULER_ROOT = "zookeeper.escheduler.root";
public static final String SCHEDULER_QUEUE_IMPL = "escheduler.queue.impl";
@ -311,7 +337,7 @@ public final class Constants {
/**
* email regex
*/
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+$");
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$");
/**
* read permission
@ -346,11 +372,6 @@ public final class Constants {
*/
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
/**
* max task timeout
*/
public static final int MAX_PROCESS_TIMEOUT = Integer.MAX_VALUE;
/**
* heartbeat threads number
@ -467,6 +488,10 @@ public final class Constants {
public static final String TASK_RECORD_PWD = "task.record.datasource.password";
public static final String DEFAULT = "Default";
public static final String PASSWORD = "password";
public static final String XXXXXX = "******";
public static String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd";
public static String TASK_RECORD_TABLE_HISTORY_HIVE_LOG = "eamp_hive_hist_log_hd";
@ -836,8 +861,44 @@ public final class Constants {
public static final String DEPENDENT_ALL = "ALL";
/**
* preview schedule execute count
*/
public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5;
/**
* java.security.krb5.conf
*/
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
/**
* java.security.krb5.conf.path
*/
public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state";
/**
* loginUserFromKeytab user
*/
public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username";
/**
* default worker group id
*/
public static final int DEFAULT_WORKER_ID = -1;
/**
* loginUserFromKeytab path
*/
public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path";
}

29
escheduler-common/src/main/java/cn/escheduler/common/enums/ResUploadType.java

@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* data base types
*/
public enum ResUploadType {
/**
* 0 hdfs
* 1 s3
* 2 none
*/
HDFS,S3,NONE
}

15
escheduler-common/src/main/java/cn/escheduler/common/enums/ZKNodeType.java

@ -0,0 +1,15 @@
package cn.escheduler.common.enums;
/**
* zk node type
*/
public enum ZKNodeType {
/**
* 0 do not send warning;
* 1 send if process success;
* 2 send if process failed;
* 3 send if process ending;
*/
MASTER, WORKER, DEAD_SERVER, TASK_QUEUE;
}

20
escheduler-common/src/main/java/cn/escheduler/common/job/db/BaseDataSource.java

@ -45,6 +45,18 @@ public abstract class BaseDataSource {
*/
private String other;
/**
* principal
*/
private String principal;
public String getPrincipal() {
return principal;
}
public void setPrincipal(String principal) {
this.principal = principal;
}
/**
* test whether the data source can be connected successfully
* @throws Exception
@ -73,14 +85,14 @@ public abstract class BaseDataSource {
this.password = password;
}
public String getAddress() {
return address;
}
public void setAddress(String address) {
this.address = address;
}
public String getAddress() {
return address;
}
public String getDatabase() {
return database;
}

13
escheduler-common/src/main/java/cn/escheduler/common/job/db/HiveDataSource.java

@ -17,12 +17,12 @@
package cn.escheduler.common.job.db;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.*;
/**
* data source of hive
@ -32,6 +32,8 @@ public class HiveDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(HiveDataSource.class);
/**
* gets the JDBC url for the data source connection
* @return
@ -43,7 +45,7 @@ public class HiveDataSource extends BaseDataSource {
jdbcUrl += "/";
}
jdbcUrl += getDatabase();
jdbcUrl += getDatabase() + ";principal=" + getPrincipal();
if (StringUtils.isNotEmpty(getOther())) {
jdbcUrl += ";" + getOther();
@ -67,11 +69,10 @@ public class HiveDataSource extends BaseDataSource {
try {
con.close();
} catch (SQLException e) {
logger.error("Postgre datasource try conn close conn error", e);
logger.error("hive datasource try conn close conn error", e);
throw e;
}
}
}
}
}

3
escheduler-common/src/main/java/cn/escheduler/common/job/db/SparkDataSource.java

@ -31,7 +31,6 @@ public class SparkDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(SparkDataSource.class);
/**
* gets the JDBC url for the data source connection
* @return
@ -43,7 +42,7 @@ public class SparkDataSource extends BaseDataSource {
jdbcUrl += "/";
}
jdbcUrl += getDatabase();
jdbcUrl += getDatabase() + ";principal=" + getPrincipal();
if (StringUtils.isNotEmpty(getOther())) {
jdbcUrl += ";" + getOther();

73
escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java

@ -22,6 +22,7 @@ import cn.escheduler.common.utils.Bytes;
import cn.escheduler.common.utils.IpUtils;
import cn.escheduler.common.utils.OSUtils;
import cn.escheduler.common.zk.AbstractZKClient;
import org.apache.commons.lang3.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.Stat;
@ -150,14 +151,34 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
int size = list.size();
Set<String> taskTreeSet = new TreeSet<>();
Set<String> taskTreeSet = new TreeSet<>(new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
String s1 = o1;
String s2 = o2;
String[] s1Array = s1.split(Constants.UNDERLINE);
if(s1Array.length>4){
// warning: if this length > 5, need to be changed
s1 = s1.substring(0, s1.lastIndexOf(Constants.UNDERLINE) );
}
String[] s2Array = s2.split(Constants.UNDERLINE);
if(s2Array.length>4){
// warning: if this length > 5, need to be changed
s2 = s2.substring(0, s2.lastIndexOf(Constants.UNDERLINE) );
}
return s1.compareTo(s2);
}
});
for (int i = 0; i < size; i++) {
String taskDetail = list.get(i);
String[] taskDetailArrs = taskDetail.split(Constants.UNDERLINE);
//向前版本兼容
//forward compatibility 向前版本兼容
if(taskDetailArrs.length >= 4){
//format ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
@ -166,15 +187,14 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
String taskHosts = taskDetailArrs[4];
//task can assign to any worker host if equals default ip value of worker server
if(!taskHosts.equals(Constants.DEFAULT_WORKER_ID)){
if(!taskHosts.equals(String.valueOf(Constants.DEFAULT_WORKER_ID))){
String[] taskHostsArr = taskHosts.split(Constants.COMMA);
if(!Arrays.asList(taskHostsArr).contains(workerIpLongStr)){
continue;
}
}
formatTask += Constants.UNDERLINE + taskDetailArrs[4];
}
taskTreeSet.add(formatTask);
}
@ -208,14 +228,41 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
int j = 0;
List<String> taskslist = new ArrayList<>(tasksNum);
while(iterator.hasNext()){
if(j++ < tasksNum){
String task = iterator.next();
taskslist.add(task);
if(j++ >= tasksNum){
break;
}
String task = iterator.next();
taskslist.add(getOriginTaskFormat(task));
}
return taskslist;
}
/**
* format ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
* processInstanceId and task id need to be convert to int.
* @param formatTask
* @return
*/
private String getOriginTaskFormat(String formatTask){
String[] taskArray = formatTask.split(Constants.UNDERLINE);
if(taskArray.length< 4){
return formatTask;
}
int processInstanceId = Integer.parseInt(taskArray[1]);
int taskId = Integer.parseInt(taskArray[3]);
StringBuilder sb = new StringBuilder(50);
String destTask = String.format("%s_%s_%s_%s", taskArray[0], processInstanceId, taskArray[2], taskId);
sb.append(destTask);
if(taskArray.length > 4){
for(int index = 4; index < taskArray.length; index++){
sb.append(Constants.UNDERLINE).append(taskArray[index]);
}
}
return sb.toString();
}
@Override
public void removeNode(String key, String nodeValue){
@ -373,16 +420,6 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
}
}
/**
* get zookeeper client of CuratorFramework
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/**
* Get the task queue path
* @param key task queue name

11
escheduler-common/src/main/java/cn/escheduler/common/utils/CommonUtils.java

@ -17,6 +17,7 @@
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ResUploadType;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -63,4 +64,14 @@ public class CommonUtils {
/**
* if upload resource is HDFS and kerberos startup is true , else false
* @return
*/
public static boolean getKerberosStartupState(){
String resUploadStartupType = PropertyUtils.getString(cn.escheduler.common.Constants.RES_UPLOAD_STARTUP_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
Boolean kerberosStartupState = getBoolean(cn.escheduler.common.Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE);
return resUploadType == ResUploadType.HDFS && kerberosStartupState;
}
}

102
escheduler-common/src/main/java/cn/escheduler/common/utils/HadoopUtils.java

@ -18,31 +18,30 @@ package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ExecutionStatus;
import cn.escheduler.common.enums.ResUploadType;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.client.cli.RMAdminCLI;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.security.PrivilegedExceptionAction;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static cn.escheduler.common.Constants.*;
import static cn.escheduler.common.utils.PropertyUtils.getInt;
import static cn.escheduler.common.utils.PropertyUtils.*;
import static cn.escheduler.common.utils.PropertyUtils.getString;
import static cn.escheduler.common.utils.PropertyUtils.getPrefixedProperties;
/**
* hadoop utils
@ -52,18 +51,41 @@ public class HadoopUtils implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class);
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
private static volatile HadoopUtils instance = new HadoopUtils();
private static volatile Configuration configuration;
private static FileSystem fs;
private HadoopUtils(){
if(StringUtils.isEmpty(hdfsUser)){
hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
}
init();
initHdfsPath();
}
public static HadoopUtils getInstance(){
return instance;
}
/**
* init escheduler root path in hdfs
*/
private void initHdfsPath(){
String hdfsPath = getString(Constants.DATA_STORE_2_HDFS_BASEPATH);
Path path = new Path(hdfsPath);
try {
if (!fs.exists(path)) {
fs.mkdirs(path);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
}
/**
* init hadoop configuration
*/
@ -73,26 +95,62 @@ public class HadoopUtils implements Closeable {
if (configuration == null) {
try {
configuration = new Configuration();
String defaultFS = configuration.get(FS_DEFAULTFS);
//first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file
// the default is the local file system
if(defaultFS.startsWith("file")){
String defaultFSProp = getString(FS_DEFAULTFS);
if(StringUtils.isNotBlank(defaultFSProp)){
Map<String, String> fsRelatedProps = getPrefixedProperties("fs.");
configuration.set(FS_DEFAULTFS,defaultFSProp);
fsRelatedProps.entrySet().stream().forEach(entry -> configuration.set(entry.getKey(), entry.getValue()));
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
if (resUploadType == ResUploadType.HDFS){
if (getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE)){
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
String defaultFS = configuration.get(FS_DEFAULTFS);
//first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file
// the default is the local file system
if(defaultFS.startsWith("file")){
String defaultFSProp = getString(FS_DEFAULTFS);
if(StringUtils.isNotBlank(defaultFSProp)){
Map<String, String> fsRelatedProps = getPrefixedProperties("fs.");
configuration.set(FS_DEFAULTFS,defaultFSProp);
fsRelatedProps.entrySet().stream().forEach(entry -> configuration.set(entry.getKey(), entry.getValue()));
}else{
logger.error("property:{} can not to be empty, please set!");
throw new RuntimeException("property:{} can not to be empty, please set!");
}
}else{
logger.error("property:{} can not to be empty, please set!");
throw new RuntimeException("property:{} can not to be empty, please set!");
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", FS_DEFAULTFS, defaultFS);
}
}else{
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", FS_DEFAULTFS, defaultFS);
}
if (fs == null) {
if (fs == null) {
if(StringUtils.isNotEmpty(hdfsUser)){
//UserGroupInformation ugi = UserGroupInformation.createProxyUser(hdfsUser,UserGroupInformation.getLoginUser());
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
@Override
public Boolean run() throws Exception {
fs = FileSystem.get(configuration);
return true;
}
});
}else{
logger.warn("hdfs.root.user is not set value!");
fs = FileSystem.get(configuration);
}
}
}else if (resUploadType == ResUploadType.S3){
configuration.set(FS_DEFAULTFS,getString(FS_DEFAULTFS));
configuration.set(FS_S3A_ENDPOINT,getString(FS_S3A_ENDPOINT));
configuration.set(FS_S3A_ACCESS_KEY,getString(FS_S3A_ACCESS_KEY));
configuration.set(FS_S3A_SECRET_KEY,getString(FS_S3A_SECRET_KEY));
fs = FileSystem.get(configuration);
}
String rmHaIds = getString(YARN_RESOURCEMANAGER_HA_RM_IDS);
String appAddress = getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
if (!StringUtils.isEmpty(rmHaIds)) {
@ -155,7 +213,7 @@ public class HadoopUtils implements Closeable {
*/
public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException {
if(StringUtils.isBlank(hdfsFilePath)){
if (StringUtils.isBlank(hdfsFilePath)){
logger.error("hdfs file path:{} is blank",hdfsFilePath);
return null;
}

6
escheduler-common/src/main/java/cn/escheduler/common/utils/IpUtils.java

@ -61,10 +61,4 @@ public class IpUtils {
return sb.toString();
}
public static void main(String[] args){
long ipLong = ipToLong("11.3.4.5");
logger.info(longToIp(ipLong));
}
}

14
escheduler-common/src/main/java/cn/escheduler/common/utils/PropertyUtils.java

@ -16,6 +16,8 @@
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ResUploadType;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -65,11 +67,15 @@ public class PropertyUtils {
}
}
/*
public static PropertyUtils getInstance(){
return propertyUtils;
/**
* judge whether resource upload startup
* @return
*/
public static Boolean getResUploadStartupState(){
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
return resUploadType == ResUploadType.HDFS || resUploadType == ResUploadType.S3;
}
*/
/**
* get property value

88
escheduler-common/src/main/java/cn/escheduler/common/zk/AbstractZKClient.java

@ -30,13 +30,12 @@ import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.framework.state.ConnectionStateListener;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.zookeeper.CreateMode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.*;
import static cn.escheduler.common.Constants.*;
@ -213,9 +212,9 @@ public abstract class AbstractZKClient {
protected void initSystemZNode(){
try {
// read master node parent path from conf
masterZNodeParentPath = conf.getString(Constants.ZOOKEEPER_ESCHEDULER_MASTERS);
masterZNodeParentPath = getMasterZNodeParentPath();
// read worker node parent path from conf
workerZNodeParentPath = conf.getString(Constants.ZOOKEEPER_ESCHEDULER_WORKERS);
workerZNodeParentPath = getWorkerZNodeParentPath();
// read server node parent path from conf
deadServerZNodeParentPath = conf.getString(ZOOKEEPER_ESCHEDULER_DEAD_SERVERS);
@ -243,6 +242,7 @@ public abstract class AbstractZKClient {
}
}
public void removeDeadServerByHost(String host, String serverType) throws Exception {
List<String> deadServers = zkClient.getChildren().forPath(deadServerZNodeParentPath);
for(String serverPath : deadServers){
@ -291,6 +291,8 @@ public abstract class AbstractZKClient {
}
/**
* for stop server
* @param serverStoppable
@ -312,7 +314,6 @@ public abstract class AbstractZKClient {
childrenList = zkClient.getChildren().forPath(masterZNodeParentPath);
}
} catch (Exception e) {
// logger.warn(e.getMessage());
if(!e.getMessage().contains("java.lang.IllegalStateException: instance must be started")){
logger.warn(e.getMessage(),e);
}
@ -340,6 +341,81 @@ public abstract class AbstractZKClient {
return sb.toString();
}
/**
* get master server list map.
* result : {host : resource info}
* @return
*/
public Map<String, String> getServerList(boolean isMaster ){
Map<String, String> masterMap = new HashMap<>();
try {
String path = isMaster ? getMasterZNodeParentPath() : getWorkerZNodeParentPath();
List<String> serverList = getZkClient().getChildren().forPath(path);
for(String server : serverList){
byte[] bytes = getZkClient().getData().forPath(path + "/" + server);
masterMap.putIfAbsent(server, new String(bytes));
}
} catch (Exception e) {
e.printStackTrace();
}
return masterMap;
}
/**
* get zkclient
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/**
* get worker node parent path
* @return
*/
protected String getWorkerZNodeParentPath(){return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_WORKERS);};
/**
* get master node parent path
* @return
*/
protected String getMasterZNodeParentPath(){return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_MASTERS);}
/**
* get master lock path
* @return
*/
public String getMasterLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_MASTERS);
}
/**
* get master start up lock path
* @return
*/
public String getMasterStartUpLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS);
}
/**
* get master failover lock path
* @return
*/
public String getMasterFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_MASTERS);
}
/**
* get worker failover lock path
* @return
*/
public String getWorkerFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_WORKERS);
}
@Override
public String toString() {
return "AbstractZKClient{" +

21
escheduler-common/src/main/resources/common/common.properties

@ -10,11 +10,26 @@ data.download.basedir.path=/tmp/escheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/escheduler/exec
# Users who have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
data.store2hdfs.basepath=/escheduler
# whether hdfs starts
hdfs.startup.state=true
# resource upload startup type : HDFS,S3,NONE
res.upload.startup.type=NONE
# whether kerberos starts
hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path
java.security.krb5.conf.path=/opt/krb5.conf
# loginUserFromKeytab user
login.user.keytab.username=hdfs-mycluster@ESZ.COM
# loginUserFromKeytab path
login.user.keytab.path=/opt/hdfs.headless.keytab
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
escheduler.env.path=/opt/.escheduler_env.sh
@ -23,5 +38,5 @@ escheduler.env.path=/opt/.escheduler_env.sh
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default "false"
development.state=false
development.state=true

12
escheduler-common/src/main/resources/common/hadoop/hadoop.properties

@ -1,6 +1,16 @@
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
# to the conf directory,support s3,for example : s3a://escheduler
fs.defaultFS=hdfs://mycluster:8020
# s3 need,s3 endpoint
fs.s3a.endpoint=http://192.168.199.91:9010
# s3 need,s3 access key
fs.s3a.access.key=A3DXS30FO22544RE
# s3 need,s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx

18
escheduler-common/src/test/java/cn/escheduler/common/queue/TaskQueueImplTest.java

@ -17,14 +17,14 @@
package cn.escheduler.common.queue;
import cn.escheduler.common.Constants;
import cn.escheduler.common.utils.IpUtils;
import cn.escheduler.common.utils.OSUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
@ -58,32 +58,30 @@ public class TaskQueueImplTest {
@Test
public void testAdd(){
//add
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1_1_1_1_2130706433,3232236775");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1_0_1_1_-1");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"0_1_1_1_2130706433,3232236775");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1_1_0_1_2130706433,3232236775");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1_1_0_1_2130706433,3232236775,"+IpUtils.ipToLong(OSUtils.getHost()));
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1_2_1_1_2130706433,3232236775");
List<String> tasks = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE, 1);
if(tasks.size() < 0){
if(tasks.size() <= 0){
return;
}
//pop
String node1 = tasks.get(0);
assertEquals(node1,"0_0000000001_1_0000000001");
assertEquals(node1,"1_0_1_1_-1");
tasks = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE, 1);
if(tasks.size() < 0){
if(tasks.size() <= 0){
return;
}
String node2 = tasks.get(0);
assertEquals(node2,"0_0000000001_1_0000000001");
}

41
escheduler-common/src/test/java/cn/escheduler/common/utils/IpUtilsTest.java

@ -0,0 +1,41 @@
package cn.escheduler.common.utils;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.*;
public class IpUtilsTest {
@Test
public void ipToLong() {
String ip = "192.168.110.1";
String ip2 = "0.0.0.0";
long longNumber = IpUtils.ipToLong(ip);
long longNumber2 = IpUtils.ipToLong(ip2);
System.out.println(longNumber);
Assert.assertEquals(longNumber, 3232263681L);
Assert.assertEquals(longNumber2, 0L);
String ip3 = "255.255.255.255";
long longNumber3 = IpUtils.ipToLong(ip3);
System.out.println(longNumber3);
Assert.assertEquals(longNumber3, 4294967295L);
}
@Test
public void longToIp() {
String ip = "192.168.110.1";
String ip2 = "0.0.0.0";
long longNum = 3232263681L;
String i1 = IpUtils.longToIp(longNum);
String i2 = IpUtils.longToIp(0);
Assert.assertEquals(ip, i1);
Assert.assertEquals(ip2, i2);
}
}

6
escheduler-dao/pom.xml

@ -4,7 +4,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.4-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<artifactId>escheduler-dao</artifactId>
<name>escheduler-dao</name>
@ -37,6 +37,10 @@
<groupId>org.apache.tomcat</groupId>
<artifactId>tomcat-jdbc</artifactId>
</exclusion>
<exclusion>
<artifactId>log4j-to-slf4j</artifactId>
<groupId>org.apache.logging.log4j</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>

72
escheduler-dao/src/main/java/cn/escheduler/dao/ProcessDao.java

@ -59,6 +59,7 @@ public class ProcessDao extends AbstractBaseDao {
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXEUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
// ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
@ -97,6 +98,12 @@ public class ProcessDao extends AbstractBaseDao {
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private WorkerServerMapper workerServerMapper;
@Autowired
private TenantMapper tenantMapper;
/**
* task queue impl
*/
@ -122,7 +129,9 @@ public class ProcessDao extends AbstractBaseDao {
udfFuncMapper = getMapper(UdfFuncMapper.class);
resourceMapper = getMapper(ResourceMapper.class);
workerGroupMapper = getMapper(WorkerGroupMapper.class);
workerServerMapper = getMapper(WorkerServerMapper.class);
taskQueue = TaskQueueFactory.getTaskQueueInstance();
tenantMapper = getMapper(TenantMapper.class);
}
@ -484,11 +493,33 @@ public class ProcessDao extends AbstractBaseDao {
processInstance.setProcessInstanceJson(processDefinition.getProcessDefinitionJson());
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
processInstance.setWorkerGroupId(command.getWorkerGroupId());
int workerGroupId = command.getWorkerGroupId() == 0 ? -1 : command.getWorkerGroupId();
processInstance.setWorkerGroupId(workerGroupId);
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
* @param tenantId
* @param userId
* @return
*/
public Tenant getTenantForProcess(int tenantId, int userId){
Tenant tenant = null;
if(tenantId >= 0){
tenant = tenantMapper.queryById(tenantId);
}
if(tenant == null){
User user = userMapper.queryById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* check command parameters is valid
@ -582,6 +613,8 @@ public class ProcessDao extends AbstractBaseDao {
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXEUTION;
int runTime = processInstance.getRunTimes();
switch (commandType){
case START_PROCESS:
@ -611,6 +644,9 @@ public class ProcessDao extends AbstractBaseDao {
// find pause tasks and init task's state
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for(Integer taskId : suspendedNodeList){
// 把暂停状态初始化
initTaskInstance(this.findTaskInstanceById(taskId));
@ -622,6 +658,7 @@ public class ProcessDao extends AbstractBaseDao {
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data
@ -653,7 +690,7 @@ public class ProcessDao extends AbstractBaseDao {
default:
break;
}
processInstance.setState(ExecutionStatus.RUNNING_EXEUTION);
processInstance.setState(runStatus);
return processInstance;
}
@ -757,13 +794,16 @@ public class ProcessDao extends AbstractBaseDao {
* @param taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance){
if(taskInstance.getState().typeIsFailure() && !taskInstance.isSubProcess()){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}else{
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
if(!taskInstance.isSubProcess()){
if(taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure()){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
@ -971,11 +1011,11 @@ public class ProcessDao extends AbstractBaseDao {
}
/**
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}_${task executed by ip1},${ip2}...
*
* The tasks with the highest priority are selected by comparing the priorities of the above four levels from high to low.
*
* 流程实例优先级_流程实例id_任务优先级_任务id high <- low
* 流程实例优先级_流程实例id_任务优先级_任务id_任务执行机器ip1ip2... high <- low
*
* @param taskInstance
* @return
@ -1614,7 +1654,6 @@ public class ProcessDao extends AbstractBaseDao {
for (ProcessInstance processInstance:processInstanceList){
processNeedFailoverProcessInstances(processInstance);
}
}
@Transactional(value = "TransactionManager",rollbackFor = Exception.class)
@ -1681,6 +1720,17 @@ public class ProcessDao extends AbstractBaseDao {
return workerGroupMapper.queryById(workerGroupId);
}
/**
* query worker server by host
* @param host
* @return
*/
public List<WorkerServer> queryWorkerServerByHost(String host){
return workerServerMapper.queryWorkerByHost(host);
}
/**
* get task worker group id

13
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/DataSourceMapper.java

@ -216,4 +216,17 @@ public interface DataSourceMapper {
@SelectProvider(type = DataSourceMapperProvider.class, method = "queryDatasourceExceptUserId")
List<DataSource> queryDatasourceExceptUserId(@Param("userId") int userId);
@Results(value = {
@Result(property = "id", column = "id", id = true, javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "name", column = "name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "note", column = "note", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "type", column = "type", typeHandler = EnumOrdinalTypeHandler.class, javaType = DbType.class, jdbcType = JdbcType.INTEGER),
@Result(property = "userId", column = "user_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "connectionParams", column = "connection_params", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
@Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE)
})
@SelectProvider(type = DataSourceMapperProvider.class, method = "listAllDataSourceByType")
List<DataSource> listAllDataSourceByType(@Param("type") Integer type);
}

19
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/DataSourceMapperProvider.java

@ -175,8 +175,7 @@ public class DataSourceMapperProvider {
}
/**
* 查询总的数据源数目
*
* Query the total number of data sources
* @param parameter
* @return
*/
@ -228,4 +227,20 @@ public class DataSourceMapperProvider {
WHERE("user_id <> #{userId}");
}}.toString();
}
/**
* list all data source by type
*
* @param parameter
* @return
*/
public String listAllDataSourceByType(Map<String, Object> parameter) {
return new SQL() {{
SELECT("*");
FROM(TABLE_NAME);
WHERE("type = #{type}");
}}.toString();
}
}

5
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapper.java

@ -95,6 +95,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "connects", column = "connects", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "receivers", column = "receivers", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "receiversCc", column = "receivers_cc", javaType = String.class, jdbcType = JdbcType.VARCHAR)
@ -123,6 +124,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "locations", column = "locations", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "connects", column = "connects", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR)
})
@SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryByDefineName")
@ -160,6 +162,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "flag", column = "flag", typeHandler = EnumOrdinalTypeHandler.class, javaType = Flag.class, jdbcType = JdbcType.TINYINT),
@Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR)
})
@SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryAllDefinitionList")
@ -187,6 +190,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "scheduleReleaseState", column = "schedule_release_state", typeHandler = EnumOrdinalTypeHandler.class, javaType = ReleaseState.class, jdbcType = JdbcType.TINYINT),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR)
})
@SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryDefineListPaging")
@ -216,6 +220,7 @@ public interface ProcessDefinitionMapper {
@Result(property = "connects", column = "connects", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "userName", column = "user_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "projectName", column = "project_name", javaType = String.class, jdbcType = JdbcType.VARCHAR)
})
@SelectProvider(type = ProcessDefinitionMapperProvider.class, method = "queryDefinitionListByIdList")

2
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessDefinitionMapperProvider.java

@ -56,6 +56,7 @@ public class ProcessDefinitionMapperProvider {
VALUES("`create_time`", "#{processDefinition.createTime}");
VALUES("`update_time`", "#{processDefinition.updateTime}");
VALUES("`timeout`", "#{processDefinition.timeout}");
VALUES("`tenant_id`", "#{processDefinition.tenantId}");
VALUES("`flag`", EnumFieldUtil.genFieldStr("processDefinition.flag", ReleaseState.class));
VALUES("`user_id`", "#{processDefinition.userId}");
@ -102,6 +103,7 @@ public class ProcessDefinitionMapperProvider {
SET("`create_time`=#{processDefinition.createTime}");
SET("`update_time`=#{processDefinition.updateTime}");
SET("`timeout`=#{processDefinition.timeout}");
SET("`tenant_id`=#{processDefinition.tenantId}");
SET("`flag`="+EnumFieldUtil.genFieldStr("processDefinition.flag", Flag.class));
SET("`user_id`=#{processDefinition.userId}");

11
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapper.java

@ -97,6 +97,7 @@ public interface ProcessInstanceMapper {
@Result(property = "queue", column = "queue", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@SelectProvider(type = ProcessInstanceMapperProvider.class, method = "queryDetailById")
@ -136,6 +137,7 @@ public interface ProcessInstanceMapper {
@Result(property = "dependenceScheduleTimes", column = "dependence_schedule_times", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@SelectProvider(type = ProcessInstanceMapperProvider.class, method = "queryById")
@ -175,6 +177,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -214,6 +217,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -262,6 +266,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -359,6 +364,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -452,6 +458,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -497,6 +504,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -542,6 +550,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@ -585,6 +594,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@SelectProvider(type = ProcessInstanceMapperProvider.class, method = "queryLastRunningProcess")
@ -628,6 +638,7 @@ public interface ProcessInstanceMapper {
@Result(property = "processInstanceJson", column = "process_instance_json", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "workerGroupId", column = "worker_group_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "timeout", column = "timeout", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "tenantId", column = "tenant_id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "processInstancePriority", column = "process_instance_priority", javaType = Priority.class, typeHandler = EnumOrdinalTypeHandler.class, jdbcType = JdbcType.TINYINT)
})
@SelectProvider(type = ProcessInstanceMapperProvider.class, method = "queryLastManualProcess")

15
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ProcessInstanceMapperProvider.java

@ -69,6 +69,7 @@ public class ProcessInstanceMapperProvider {
VALUES("`executor_id`", "#{processInstance.executorId}");
VALUES("`worker_group_id`", "#{processInstance.workerGroupId}");
VALUES("`timeout`", "#{processInstance.timeout}");
VALUES("`tenant_id`", "#{processInstance.tenantId}");
VALUES("`process_instance_priority`", EnumFieldUtil.genFieldStr("processInstance.processInstancePriority", Priority.class));
}
}.toString();
@ -141,6 +142,7 @@ public class ProcessInstanceMapperProvider {
SET("`dependence_schedule_times`=#{processInstance.dependenceScheduleTimes}");
SET("`is_sub_process`="+EnumFieldUtil.genFieldStr("processInstance.isSubProcess", Flag.class));
SET("`executor_id`=#{processInstance.executorId}");
SET("`tenant_id`=#{processInstance.tenantId}");
SET("`worker_group_id`=#{processInstance.workerGroupId}");
SET("`timeout`=#{processInstance.timeout}");
@ -220,11 +222,11 @@ public class ProcessInstanceMapperProvider {
public String queryDetailById(Map<String, Object> parameter) {
return new SQL() {
{
SELECT("inst.*,q.queue_name as queue,t.tenant_code,UNIX_TIMESTAMP(inst.end_time)-UNIX_TIMESTAMP(inst.start_time) as duration");
SELECT("inst.*,UNIX_TIMESTAMP(inst.end_time)-UNIX_TIMESTAMP(inst.start_time) as duration");
FROM(TABLE_NAME + " inst, t_escheduler_user u,t_escheduler_tenant t,t_escheduler_queue q");
FROM(TABLE_NAME + " inst");
WHERE("inst.executor_id = u.id AND u.tenant_id = t.id AND t.queue_id = q.id AND inst.id = #{processId}");
WHERE("inst.id = #{processId}");
}
}.toString();
}
@ -402,7 +404,12 @@ public class ProcessInstanceMapperProvider {
FROM(TABLE_NAME);
WHERE("`host` = #{host} and `state` in (" + strStates.toString() +")");
Object host = parameter.get("host");
if(host != null && StringUtils.isNotEmpty(host.toString())){
WHERE("`host` = #{host} ");
}
WHERE("`state` in (" + strStates.toString() +")");
ORDER_BY("`id` asc");

18
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ResourceMapper.java

@ -274,5 +274,21 @@ public interface ResourceMapper {
@SelectProvider(type = ResourceMapperProvider.class, method = "queryTenantCodeByResourceName")
String queryTenantCodeByResourceName(@Param("resName") String resName);
/**
* query resource list that the appointed user has permission
* @param type
* @return
*/
@Results(value = {@Result(property = "id", column = "id", id = true, javaType = int.class, jdbcType = JdbcType.INTEGER),
@Result(property = "alias", column = "alias", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "fileName", column = "file_name", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "desc", column = "desc", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "userId", column = "user_id", javaType = int.class, jdbcType = JdbcType.INTEGER),
@Result(property = "type", column = "type", typeHandler = EnumOrdinalTypeHandler.class, javaType = ResourceType.class, jdbcType = JdbcType.TINYINT),
@Result(property = "size", column = "size", javaType = Long.class, jdbcType = JdbcType.BIGINT),
@Result(property = "createTime", column = "create_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE),
@Result(property = "updateTime", column = "update_time", javaType = Timestamp.class, jdbcType = JdbcType.DATE)
})
@SelectProvider(type = ResourceMapperProvider.class, method = "listAllResourceByType")
List<Resource> listAllResourceByType(@Param("type") Integer type);
}

15
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/ResourceMapperProvider.java

@ -295,4 +295,19 @@ public class ResourceMapperProvider {
WHERE("type = #{type} and user_id = #{userId}");
}}.toString();
}
/**
* list all resource by type
*
* @param parameter
* @return
*/
public String listAllResourceByType(Map<String, Object> parameter) {
return new SQL() {{
SELECT("*");
FROM(TABLE_NAME);
WHERE("type = #{type}");
}}.toString();
}
}

7
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/TaskInstanceMapperProvider.java

@ -228,7 +228,12 @@ public class TaskInstanceMapperProvider {
SELECT("*, UNIX_TIMESTAMP(end_time)-UNIX_TIMESTAMP(start_time) as duration");
FROM(TABLE_NAME);
WHERE("`host` = #{host} and `state` in (" + strStates.toString() +")");
Object host = parameter.get("host");
if(host != null && StringUtils.isNotEmpty(host.toString())){
WHERE("`host` = #{host} ");
}
WHERE("`state` in (" + strStates.toString() +")");
ORDER_BY("`id` asc");
}
}.toString();

2
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/UserMapperProvider.java

@ -187,7 +187,6 @@ public class UserMapperProvider {
return new SQL() {{
SELECT("count(0)");
FROM(TABLE_NAME);
WHERE("user_type = 1");
Object searchVal = parameter.get("searchVal");
if(searchVal != null && StringUtils.isNotEmpty(searchVal.toString())){
WHERE( " user_name like concat('%', #{searchVal}, '%') ");
@ -209,7 +208,6 @@ public class UserMapperProvider {
FROM(TABLE_NAME + " u ");
LEFT_OUTER_JOIN("t_escheduler_tenant t on u.tenant_id = t.id");
LEFT_OUTER_JOIN("t_escheduler_queue q on t.queue_id = q.id");
WHERE("u.user_type = 1");
Object searchVal = parameter.get("searchVal");
if(searchVal != null && StringUtils.isNotEmpty(searchVal.toString())){
WHERE( " u.user_name like concat('%', #{searchVal}, '%') ");

17
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/WorkerServerMapper.java

@ -42,6 +42,23 @@ public interface WorkerServerMapper {
@SelectProvider(type = WorkerServerMapperProvider.class, method = "queryAllWorker")
List<WorkerServer> queryAllWorker();
/**
* query worker list
*
* @return
*/
@Results(value = {
@Result(property = "id", column = "id", javaType = Integer.class, jdbcType = JdbcType.INTEGER),
@Result(property = "host", column = "host", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "port", column = "port", javaType = int.class, jdbcType = JdbcType.INTEGER),
@Result(property = "zkDirectory", column = "zk_directory", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "resInfo", column = "res_info", javaType = String.class, jdbcType = JdbcType.VARCHAR),
@Result(property = "createTime", column = "create_time", javaType = Date.class, jdbcType = JdbcType.TIMESTAMP),
@Result(property = "lastHeartbeatTime", column = "last_heartbeat_time", javaType = Date.class, jdbcType = JdbcType.TIMESTAMP)
})
@SelectProvider(type = WorkerServerMapperProvider.class, method = "queryWorkerByHost")
List<WorkerServer> queryWorkerByHost(@Param("host") String host);
/**
* insert worker server
*

15
escheduler-dao/src/main/java/cn/escheduler/dao/mapper/WorkerServerMapperProvider.java

@ -37,6 +37,21 @@ public class WorkerServerMapperProvider {
}}.toString();
}
/**
* query worker list
* @return
*/
public String queryWorkerByHost(Map<String, Object> parameter) {
return new SQL() {{
SELECT("*");
FROM(TABLE_NAME);
WHERE("host = #{host}");
}}.toString();
}
/**
* insert worker server
* @param parameter

10
escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessData.java

@ -39,6 +39,8 @@ public class ProcessData {
private int timeout;
private int tenantId;
public ProcessData() {
}
@ -92,4 +94,12 @@ public class ProcessData {
public void setTimeout(int timeout) {
this.timeout = timeout;
}
public int getTenantId() {
return tenantId;
}
public void setTenantId(int tenantId) {
this.tenantId = tenantId;
}
}

13
escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessDefinition.java

@ -141,6 +141,11 @@ public class ProcessDefinition {
*/
private int timeout;
/**
* tenant id
*/
private int tenantId;
public String getName() {
return name;
@ -354,7 +359,15 @@ public class ProcessDefinition {
", receiversCc='" + receiversCc + '\'' +
", scheduleReleaseState=" + scheduleReleaseState +
", timeout=" + timeout +
", tenantId=" + tenantId +
'}';
}
public int getTenantId() {
return tenantId;
}
public void setTenantId(int tenantId) {
this.tenantId = tenantId;
}
}

53
escheduler-dao/src/main/java/cn/escheduler/dao/model/ProcessInstance.java

@ -188,6 +188,27 @@ public class ProcessInstance {
*/
private int timeout;
/**
* tenant id
*/
private int tenantId;
/**
* worker group name. for api.
*/
private String workerGroupName;
/**
* receivers for api
*/
private String receivers;
/**
* receivers cc for api
*/
private String receiversCc;
public ProcessInstance(){
}
@ -534,6 +555,7 @@ public class ProcessInstance {
", processInstanceJson='" + processInstanceJson + '\'' +
", executorId=" + executorId +
", tenantCode='" + tenantCode + '\'' +
", tenantId='" + tenantId + '\'' +
", queue='" + queue + '\'' +
", isSubProcess=" + isSubProcess +
", locations='" + locations + '\'' +
@ -546,4 +568,35 @@ public class ProcessInstance {
'}';
}
public void setTenantId(int tenantId) {
this.tenantId = tenantId;
}
public int getTenantId() {
return this.tenantId ;
}
public String getWorkerGroupName() {
return workerGroupName;
}
public void setWorkerGroupName(String workerGroupName) {
this.workerGroupName = workerGroupName;
}
public String getReceivers() {
return receivers;
}
public void setReceivers(String receivers) {
this.receivers = receivers;
}
public String getReceiversCc() {
return receiversCc;
}
public void setReceiversCc(String receiversCc) {
this.receiversCc = receiversCc;
}
}

8
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/CreateEscheduler.java

@ -30,13 +30,15 @@ public class CreateEscheduler {
public static void main(String[] args) {
EschedulerManager eschedulerManager = new EschedulerManager();
eschedulerManager.initEscheduler();
logger.info("init escheduler finished");
try {
eschedulerManager.initEscheduler();
logger.info("init escheduler finished");
eschedulerManager.upgradeEscheduler();
logger.info("upgrade escheduler finished");
logger.info("create escheduler success");
} catch (Exception e) {
logger.error("upgrade escheduler failed",e);
logger.error("create escheduler failed",e);
}
}

2
escheduler-dao/src/main/java/cn/escheduler/dao/upgrade/shell/UpgradeEscheduler.java

@ -30,7 +30,7 @@ public class UpgradeEscheduler {
EschedulerManager eschedulerManager = new EschedulerManager();
try {
eschedulerManager.upgradeEscheduler();
logger.info("upgrade escheduler finished");
logger.info("upgrade escheduler success");
} catch (Exception e) {
logger.error(e.getMessage(),e);
logger.info("Upgrade escheduler failed");

125
escheduler-dao/src/main/java/cn/escheduler/dao/utils/DagHelper.java

@ -18,16 +18,20 @@ package cn.escheduler.dao.utils;
import cn.escheduler.common.enums.TaskDependType;
import cn.escheduler.common.graph.DAG;
import cn.escheduler.common.model.TaskNode;
import cn.escheduler.common.model.TaskNodeRelation;
import cn.escheduler.common.process.ProcessDag;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.dao.model.ProcessData;
import cn.escheduler.dao.model.TaskInstance;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* dag tools
@ -105,8 +109,7 @@ public class DagHelper {
}
for (TaskNode taskNode : tmpTaskNodeList) {
if ( !taskNode.isForbidden()
&& null == findNodeByName(destTaskNodeList, taskNode.getName())) {
if (null == findNodeByName(destTaskNodeList, taskNode.getName())) {
destTaskNodeList.add(taskNode);
}
}
@ -193,6 +196,24 @@ public class DagHelper {
return processDag;
}
/**
* parse the forbidden task nodes in process definition.
* @param processDefinitionJson
* @return
*/
public static Map<String, TaskNode> getForbiddenTaskNodeMaps(String processDefinitionJson){
Map<String, TaskNode> forbidTaskNodeMap = new ConcurrentHashMap<>();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = processData.getTasks();
for(TaskNode node : taskNodeList){
if(node.isForbidden()){
forbidTaskNodeMap.putIfAbsent(node.getName(), node);
}
}
return forbidTaskNodeMap;
}
/**
* find node by node name
@ -210,4 +231,100 @@ public class DagHelper {
}
return null;
}
/**
* get start vertex in one dag
* it would find the post node if the start vertex is forbidden running
* @param parentNodeName the previous node
* @param dag
* @param completeTaskList
* @return
*/
public static Collection<String> getStartVertex(String parentNodeName, DAG<String, TaskNode, TaskNodeRelation> dag,
Map<String, TaskInstance> completeTaskList){
if(completeTaskList == null){
completeTaskList = new HashMap<>();
}
Collection<String> startVertexs = null;
if(StringUtils.isNotEmpty(parentNodeName)){
startVertexs = dag.getSubsequentNodes(parentNodeName);
}else{
startVertexs = dag.getBeginNode();
}
List<String> tmpStartVertexs = new ArrayList<>();
if(startVertexs!= null){
tmpStartVertexs.addAll(startVertexs);
}
for(String start : startVertexs){
TaskNode startNode = dag.getNode(start);
if(!startNode.isForbidden() && !completeTaskList.containsKey(start)){
continue;
}
Collection<String> postNodes = getStartVertex(start, dag, completeTaskList);
for(String post : postNodes){
if(checkForbiddenPostCanSubmit(post, dag)){
tmpStartVertexs.add(post);
}
}
tmpStartVertexs.remove(start);
}
return tmpStartVertexs;
}
/**
*
* @param postNodeName
* @param dag
* @return
*/
private static boolean checkForbiddenPostCanSubmit(String postNodeName, DAG<String, TaskNode, TaskNodeRelation> dag){
TaskNode postNode = dag.getNode(postNodeName);
List<String> dependList = postNode.getDepList();
for(String dependNodeName : dependList){
TaskNode dependNode = dag.getNode(dependNodeName);
if(!dependNode.isForbidden()){
return false;
}
}
return true;
}
/***
* generate dag graph
* @param processDag
* @return
*/
public static DAG<String, TaskNode, TaskNodeRelation> buildDagGraph(ProcessDag processDag) {
DAG<String,TaskNode,TaskNodeRelation> dag = new DAG<>();
/**
* add vertex
*/
if (CollectionUtils.isNotEmpty(processDag.getNodes())){
for (TaskNode node : processDag.getNodes()){
dag.addNode(node.getName(),node);
}
}
/**
* add edge
*/
if (CollectionUtils.isNotEmpty(processDag.getEdges())){
for (TaskNodeRelation edge : processDag.getEdges()){
dag.addEdge(edge.getStartNode(),edge.getEndNode());
}
}
return dag;
}
}

6
escheduler-dao/src/main/resources/dao/data_source.properties

@ -1,9 +1,9 @@
# base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
spring.datasource.driver-class-name=com.mysql.jdbc.Driver
spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=UTF-8
spring.datasource.username=xx
spring.datasource.password=xx
spring.datasource.url=jdbc:mysql://192.168.220.188:3306/escheduler_new?characterEncoding=UTF-8
spring.datasource.username=root
spring.datasource.password=root@123
# connection configuration
spring.datasource.initialSize=5

2
escheduler-rpc/pom.xml

@ -4,7 +4,7 @@
<parent>
<artifactId>escheduler</artifactId>
<groupId>cn.analysys</groupId>
<version>1.0.4-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>

2
escheduler-server/pom.xml

@ -3,7 +3,7 @@
<parent>
<artifactId>escheduler</artifactId>
<groupId>cn.analysys</groupId>
<version>1.0.4-SNAPSHOT</version>
<version>1.1.0-SNAPSHOT</version>
</parent>
<artifactId>escheduler-server</artifactId>
<name>escheduler-server</name>

35
escheduler-server/src/main/java/cn/escheduler/server/ResInfo.java

@ -17,8 +17,12 @@
package cn.escheduler.server;
import cn.escheduler.common.Constants;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.common.utils.JSONUtils;
import cn.escheduler.common.utils.OSUtils;
import cn.escheduler.dao.model.MasterServer;
import java.util.Date;
/**
* heartbeat for ZK reigster res info
@ -98,6 +102,16 @@ public class ResInfo {
}
public static String getHeartBeatInfo(Date now){
return buildHeartbeatForZKInfo(OSUtils.getHost(),
OSUtils.getProcessID(),
OSUtils.cpuUsage(),
OSUtils.memoryUsage(),
DateUtils.dateToString(now),
DateUtils.dateToString(now));
}
/**
* build heartbeat info for zk
* @param host
@ -119,4 +133,25 @@ public class ResInfo {
+ lastHeartbeatTime;
}
/**
* parse heartbeat info for zk
* @param heartBeatInfo
* @return
*/
public static MasterServer parseHeartbeatForZKInfo(String heartBeatInfo){
MasterServer masterServer = null;
String[] masterArray = heartBeatInfo.split(Constants.COMMA);
if(masterArray.length != 6){
return masterServer;
}
masterServer = new MasterServer();
masterServer.setHost(masterArray[0]);
masterServer.setPort(Integer.parseInt(masterArray[1]));
masterServer.setResInfo(getResInfoJson(Double.parseDouble(masterArray[2]), Double.parseDouble(masterArray[3])));
masterServer.setCreateTime(DateUtils.stringToDate(masterArray[4]));
masterServer.setLastHeartbeatTime(DateUtils.stringToDate(masterArray[5]));
return masterServer;
}
}

51
escheduler-server/src/main/java/cn/escheduler/server/master/runner/MasterExecThread.java

@ -79,6 +79,7 @@ public class MasterExecThread implements Runnable {
private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>();
private Map<String, TaskInstance> readyToSubmitTaskList = new ConcurrentHashMap<>();
private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>();
private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>();
private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>();
private AlertManager alertManager = new AlertManager();
@ -269,6 +270,7 @@ public class MasterExecThread implements Runnable {
private void buildFlowDag() throws Exception {
recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam());
forbiddenTaskList = DagHelper.getForbiddenTaskNodeMaps(processInstance.getProcessInstanceJson());
// generate process to get DAG info
List<String> recoveryNameList = getRecoveryNodeNameList();
List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam());
@ -279,7 +281,8 @@ public class MasterExecThread implements Runnable {
return;
}
// generate process dag
dag = buildDagGraph(processDag);
dag = DagHelper.buildDagGraph(processDag);
}
private void initTaskQueue(){
@ -411,6 +414,8 @@ public class MasterExecThread implements Runnable {
return taskInstance;
}
/**
* get post task instance by node
*
@ -421,14 +426,12 @@ public class MasterExecThread implements Runnable {
private List<TaskInstance> getPostTaskInstanceByNode(DAG<String, TaskNode, TaskNodeRelation> dag, String parentNodeName){
List<TaskInstance> postTaskList = new ArrayList<>();
Collection<String> startVertex = null;
if(StringUtils.isNotEmpty(parentNodeName)){
startVertex = dag.getSubsequentNodes(parentNodeName);
}else{
startVertex = dag.getBeginNode();
Collection<String> startVertex = DagHelper.getStartVertex(parentNodeName, dag, completeTaskList);
if(startVertex == null){
return postTaskList;
}
for (String nodeName : startVertex){
for (String nodeName : startVertex){
// encapsulation task instance
TaskInstance taskInstance = createTaskInstance(processInstance, nodeName ,
dag.getNode(nodeName),parentNodeName);
@ -517,7 +520,10 @@ public class MasterExecThread implements Runnable {
List<String> depsNameList = taskNode.getDepList();
for(String depsNode : depsNameList ){
// dependencies must be all complete
if(forbiddenTaskList.containsKey(depsNode)){
continue;
}
// dependencies must be fully completed
if(!completeTaskList.containsKey(depsNode)){
return DependResult.WAITING;
}
@ -904,35 +910,6 @@ public class MasterExecThread implements Runnable {
}
}
/***
* generate dag graph
* @param processDag
* @return
*/
public DAG<String, TaskNode, TaskNodeRelation> buildDagGraph(ProcessDag processDag) {
DAG<String,TaskNode,TaskNodeRelation> dag = new DAG<>();
/**
* add vertex
*/
if (CollectionUtils.isNotEmpty(processDag.getNodes())){
for (TaskNode node : processDag.getNodes()){
dag.addNode(node.getName(),node);
}
}
/**
* add edge
*/
if (CollectionUtils.isNotEmpty(processDag.getEdges())){
for (TaskNodeRelation edge : processDag.getEdges()){
dag.addEdge(edge.getStartNode(),edge.getEndNode());
}
}
return dag;
}
/**
* whether the retry interval is timed out
* @param taskInstance

3
escheduler-server/src/main/java/cn/escheduler/server/utils/LoggerUtils.java

@ -16,6 +16,7 @@
*/
package cn.escheduler.server.utils;
import cn.escheduler.common.Constants;
import org.slf4j.Logger;
import java.util.ArrayList;
@ -31,7 +32,7 @@ public class LoggerUtils {
/**
* rules for extracting application ID
*/
private static final Pattern APPLICATION_REGEX = Pattern.compile("\\d+_\\d+");
private static final Pattern APPLICATION_REGEX = Pattern.compile(Constants.APPLICATION_REGEX);
/**
* build job id

5
escheduler-server/src/main/java/cn/escheduler/server/utils/ProcessUtils.java

@ -294,9 +294,8 @@ public class ProcessUtils {
/**
* find logs and kill yarn tasks
* @param taskInstance
* @throws IOException
*/
public static void killYarnJob(TaskInstance taskInstance) throws Exception {
public static void killYarnJob(TaskInstance taskInstance) {
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
LogClient logClient = new LogClient(taskInstance.getHost(), Constants.RPC_PORT);
@ -316,7 +315,7 @@ public class ProcessUtils {
} catch (Exception e) {
logger.error("kill yarn job failed : " + e.getMessage(),e);
throw new RuntimeException("kill yarn job fail");
// throw new RuntimeException("kill yarn job fail");
}
}
}

21
escheduler-server/src/main/java/cn/escheduler/server/worker/runner/FetchTaskThread.java

@ -23,10 +23,7 @@ import cn.escheduler.common.thread.ThreadUtils;
import cn.escheduler.common.utils.FileUtils;
import cn.escheduler.common.utils.OSUtils;
import cn.escheduler.dao.ProcessDao;
import cn.escheduler.dao.model.ProcessDefinition;
import cn.escheduler.dao.model.ProcessInstance;
import cn.escheduler.dao.model.TaskInstance;
import cn.escheduler.dao.model.WorkerGroup;
import cn.escheduler.dao.model.*;
import cn.escheduler.server.zk.ZKWorkerClient;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.lang3.StringUtils;
@ -156,7 +153,7 @@ public class FetchTaskThread implements Runnable{
}
String[] taskStringArray = taskQueueStr.split(Constants.UNDERLINE);
String taskInstIdStr = taskStringArray[taskStringArray.length - 1];
String taskInstIdStr = taskStringArray[3];
Date now = new Date();
Integer taskId = Integer.parseInt(taskInstIdStr);
@ -211,9 +208,17 @@ public class FetchTaskThread implements Runnable{
// set task execute path
taskInstance.setExecutePath(execLocalPath);
// check and create Linux users
FileUtils.createWorkDirAndUserIfAbsent(execLocalPath,
processInstance.getTenantCode(), logger);
Tenant tenant = processDao.getTenantForProcess(processInstance.getTenantId(),
processDefine.getUserId());
if(tenant == null){
logger.error("cannot find suitable tenant for the task:{}, process instance tenant:{}, process definition tenant:{}",
taskInstance.getName(),processInstance.getTenantId(), processDefine.getTenantId());
continue;
}
// check and create Linux users
FileUtils.createWorkDirAndUserIfAbsent(execLocalPath,
tenant.getTenantCode(), logger);
logger.info("task : {} ready to submit to task scheduler thread",taskId);
// submit task

144
escheduler-server/src/main/java/cn/escheduler/server/worker/runner/TaskScheduleThread.java

@ -34,8 +34,10 @@ import cn.escheduler.common.task.sql.SqlParameters;
import cn.escheduler.common.utils.*;
import cn.escheduler.dao.ProcessDao;
import cn.escheduler.dao.TaskRecordDao;
import cn.escheduler.dao.model.ProcessDefinition;
import cn.escheduler.dao.model.ProcessInstance;
import cn.escheduler.dao.model.TaskInstance;
import cn.escheduler.dao.model.Tenant;
import cn.escheduler.server.utils.LoggerUtils;
import cn.escheduler.server.utils.ParamUtils;
import cn.escheduler.server.worker.log.TaskLogger;
@ -160,82 +162,94 @@ public class TaskScheduleThread implements Callable<Boolean> {
// set task params
taskProps.setTaskParams(taskNode.getParams());
// set tenant code , execute task linux user
taskProps.setTenantCode(taskInstance.getProcessInstance().getTenantCode());
ProcessInstance processInstance = processDao.findProcessInstanceByTaskId(taskInstance.getId());
String queue = processDao.queryQueueByProcessInstanceId(processInstance.getId());
taskProps.setScheduleTime(processInstance.getScheduleTime());
taskProps.setNodeName(taskInstance.getName());
taskProps.setTaskInstId(taskInstance.getId());
taskProps.setEnvFile(CommonUtils.getSystemEnvPath());
// set queue
if (StringUtils.isEmpty(queue)){
taskProps.setQueue(taskInstance.getProcessInstance().getQueue());
}else {
taskProps.setQueue(queue);
}
taskProps.setTaskStartTime(taskInstance.getStartTime());
taskProps.setDefinedParams(allParamMap);
// set task timeout
setTaskTimeout(taskProps, taskNode);
taskProps.setDependence(taskInstance.getDependency());
taskProps.setTaskAppId(String.format("%s_%s_%s",
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId()));
// custom logger
TaskLogger taskLogger = new TaskLogger(LoggerUtils.buildTaskId(TASK_PREFIX,
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId()));
task = TaskManager.newTask(taskInstance.getTaskType(), taskProps, taskLogger);
// job init
task.init();
// job handle
task.handle();
logger.info("task : {} exit status code : {}", taskProps.getTaskAppId(),task.getExitStatusCode());
if (task.getExitStatusCode() == Constants.EXIT_CODE_SUCCESS){
status = ExecutionStatus.SUCCESS;
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskInstance.getTaskType())){
AbstractParameters params = (AbstractParameters) JSONUtils.parseObject(taskProps.getTaskParams(), getCurTaskParamsClass());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(taskProps.getUserDefParamsMap(),
taskProps.getDefinedParams(),
params.getLocalParametersMap(),
processInstance.getCmdTypeIfComplement(),
processInstance.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")){
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)){
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskInstance.getName(), vProcDate);
logger.info("task record status : {}",taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE){
status = ExecutionStatus.FAILURE;
ProcessDefinition processDefine = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
Tenant tenant = processDao.getTenantForProcess(processInstance.getTenantId(),
processDefine.getUserId());
if(tenant == null){
processInstance.setTenantCode(tenant.getTenantCode());
logger.error("cannot find the tenant, process definition id:{}, tenant id:{}, user id:{}",
processDefine.getId(), processDefine.getTenantId(), processDefine.getUserId()
);
status = ExecutionStatus.FAILURE;
}else{
taskProps.setTenantCode(tenant.getTenantCode());
String queue = processDao.queryQueueByProcessInstanceId(processInstance.getId());
// set queue
if (StringUtils.isEmpty(queue)){
taskProps.setQueue(taskInstance.getProcessInstance().getQueue());
}else {
taskProps.setQueue(tenant.getQueueName());
}
taskProps.setTaskStartTime(taskInstance.getStartTime());
taskProps.setDefinedParams(allParamMap);
// set task timeout
setTaskTimeout(taskProps, taskNode);
taskProps.setDependence(taskInstance.getDependency());
taskProps.setTaskAppId(String.format("%s_%s_%s",
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId()));
// custom logger
TaskLogger taskLogger = new TaskLogger(LoggerUtils.buildTaskId(TASK_PREFIX,
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId()));
task = TaskManager.newTask(taskInstance.getTaskType(), taskProps, taskLogger);
// job init
task.init();
// job handle
task.handle();
logger.info("task : {} exit status code : {}", taskProps.getTaskAppId(),task.getExitStatusCode());
if (task.getExitStatusCode() == Constants.EXIT_CODE_SUCCESS){
status = ExecutionStatus.SUCCESS;
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskInstance.getTaskType())){
AbstractParameters params = (AbstractParameters) JSONUtils.parseObject(taskProps.getTaskParams(), getCurTaskParamsClass());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(taskProps.getUserDefParamsMap(),
taskProps.getDefinedParams(),
params.getLocalParametersMap(),
processInstance.getCmdTypeIfComplement(),
processInstance.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")){
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)){
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskInstance.getName(), vProcDate);
logger.info("task record status : {}",taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE){
status = ExecutionStatus.FAILURE;
}
}
}
}
}
}else if (task.getExitStatusCode() == Constants.EXIT_CODE_KILL){
status = ExecutionStatus.KILL;
}else {
status = ExecutionStatus.FAILURE;
}else if (task.getExitStatusCode() == Constants.EXIT_CODE_KILL){
status = ExecutionStatus.KILL;
}else {
status = ExecutionStatus.FAILURE;
}
}
}catch (Exception e){
logger.error("task escheduler failure : " + e.getMessage(),e);

9
escheduler-server/src/main/java/cn/escheduler/server/worker/task/PythonCommandExecutor.java

@ -18,6 +18,7 @@ package cn.escheduler.server.worker.task;
import cn.escheduler.common.Constants;
import cn.escheduler.common.utils.FileUtils;
import cn.escheduler.common.utils.PropertyUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -71,11 +72,11 @@ public class PythonCommandExecutor extends AbstractCommandExecutor {
if (!Files.exists(Paths.get(commandFile))) {
logger.info("generate command file:{}", commandFile);
StringBuilder sb = new StringBuilder(200);
StringBuilder sb = new StringBuilder();
sb.append("#-*- encoding=utf8 -*-\n");
sb.append("\n\n");
sb.append(String.format("import py_%s_node\n",taskAppId));
sb.append(execCommand);
logger.info(sb.toString());
// write data to file
@ -86,8 +87,8 @@ public class PythonCommandExecutor extends AbstractCommandExecutor {
@Override
protected String commandType() {
String envPath = System.getProperty("user.dir") + Constants.SINGLE_SLASH + "conf"+
Constants.SINGLE_SLASH +"env" + Constants.SINGLE_SLASH + Constants.ESCHEDULER_ENV_SH;
String envPath = PropertyUtils.getString(Constants.ESCHEDULER_ENV_PATH);
String pythonHome = getPythonHome(envPath);
if (StringUtils.isEmpty(pythonHome)){
return PYTHON;

29
escheduler-server/src/main/java/cn/escheduler/server/worker/task/python/PythonTask.java

@ -112,14 +112,14 @@ public class PythonTask extends AbstractTask {
*/
private String buildCommand() throws Exception {
// generate scripts
String fileName = String.format("%s/py_%s_node.py", taskDir, taskProps.getTaskAppId());
Path path = new File(fileName).toPath();
// String fileName = String.format("%s/py_%s_node.py", taskDir, taskProps.getTaskAppId());
// Path path = new File(fileName).toPath();
if (Files.exists(path)) {
return fileName;
}
// if (Files.exists(path)) {
// return fileName;
// }
String rawScript = pythonParameters.getRawScript().replaceAll("\\r\\n", "\n");
@ -140,19 +140,20 @@ public class PythonTask extends AbstractTask {
}
pythonParameters.setRawScript(rawScript);
// pythonParameters.setRawScript(rawScript);
logger.info("raw script : {}", pythonParameters.getRawScript());
logger.info("task dir : {}", taskDir);
Set<PosixFilePermission> perms = PosixFilePermissions.fromString("rwxr-xr-x");
FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms);
Files.createFile(path, attr);
Files.write(path, pythonParameters.getRawScript().getBytes(), StandardOpenOption.APPEND);
return fileName;
// Set<PosixFilePermission> perms = PosixFilePermissions.fromString("rwxr-xr-x");
// FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms);
//
// Files.createFile(path, attr);
//
// Files.write(path, pythonParameters.getRawScript().getBytes(), StandardOpenOption.APPEND);
//
// return fileName;
return rawScript;
}
@Override

17
escheduler-server/src/main/java/cn/escheduler/server/worker/task/sql/SqlTask.java

@ -29,6 +29,7 @@ import cn.escheduler.common.task.sql.SqlBinds;
import cn.escheduler.common.task.sql.SqlParameters;
import cn.escheduler.common.task.sql.SqlType;
import cn.escheduler.common.utils.CollectionUtils;
import cn.escheduler.common.utils.CommonUtils;
import cn.escheduler.common.utils.ParameterUtils;
import cn.escheduler.dao.AlertDao;
import cn.escheduler.dao.DaoFactory;
@ -43,6 +44,8 @@ import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.serializer.SerializerFeature;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.EnumUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import java.sql.*;
@ -51,6 +54,8 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static cn.escheduler.common.utils.PropertyUtils.getString;
/**
* sql task
*/
@ -228,7 +233,15 @@ public class SqlTask extends AbstractTask {
List<String> createFuncs){
Connection connection = null;
try {
if (CommonUtils.getKerberosStartupState()) {
System.setProperty(cn.escheduler.common.Constants.JAVA_SECURITY_KRB5_CONF,
getString(cn.escheduler.common.Constants.JAVA_SECURITY_KRB5_CONF_PATH));
Configuration configuration = new Configuration();
configuration.set(cn.escheduler.common.Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(getString(cn.escheduler.common.Constants.LOGIN_USER_KEY_TAB_USERNAME),
getString(cn.escheduler.common.Constants.LOGIN_USER_KEY_TAB_PATH));
}
if (DbType.HIVE.name().equals(sqlParameters.getType())) {
Properties paramProp = new Properties();
paramProp.setProperty("user", baseDataSource.getUser());
@ -278,7 +291,7 @@ public class SqlTask extends AbstractTask {
array.add(mapOfColValues);
}
logger.info("execute sql : {}", JSONObject.toJSONString(array, SerializerFeature.WriteMapNullValue));
logger.debug("execute sql : {}", JSONObject.toJSONString(array, SerializerFeature.WriteMapNullValue));
// send as an attachment
if (StringUtils.isEmpty(sqlParameters.getShowType())) {

224
escheduler-server/src/main/java/cn/escheduler/server/zk/ZKMasterClient.java

@ -18,6 +18,7 @@ package cn.escheduler.server.zk;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ExecutionStatus;
import cn.escheduler.common.enums.ZKNodeType;
import cn.escheduler.common.utils.CollectionUtils;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.common.utils.OSUtils;
@ -28,10 +29,11 @@ import cn.escheduler.dao.ProcessDao;
import cn.escheduler.dao.ServerDao;
import cn.escheduler.dao.model.ProcessInstance;
import cn.escheduler.dao.model.TaskInstance;
import cn.escheduler.dao.model.WorkerServer;
import cn.escheduler.server.ResInfo;
import cn.escheduler.server.utils.ProcessUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
@ -134,7 +136,9 @@ public class ZKMasterClient extends AbstractZKClient {
// check if fault tolerance is required,failure and tolerance
if (getActiveMasterNum() == 1) {
processDao.masterStartupFaultTolerant();
failoverWorker(null, true);
// processDao.masterStartupFaultTolerant();
failoverMaster(null);
}
}catch (Exception e){
@ -190,31 +194,20 @@ public class ZKMasterClient extends AbstractZKClient {
Date now = new Date();
createTime = now ;
try {
String osHost = OSUtils.getHost();
// encapsulation master znnode
masterZNode = masterZNodeParentPath + "/" + OSUtils.getHost() + "_";
List<String> masterZNodeList = zkClient.getChildren().forPath(masterZNodeParentPath);
if (CollectionUtils.isNotEmpty(masterZNodeList)){
boolean flag = false;
for (String masterZNode : masterZNodeList){
if (masterZNode.startsWith(OSUtils.getHost())){
flag = true;
break;
}
}
if (flag){
logger.error("register failure , master already started on host : {}" , OSUtils.getHost());
// exit system
System.exit(-1);
}
// zookeeper node exists, cannot start a new one.
if(checkZKNodeExists(osHost, ZKNodeType.MASTER)){
logger.error("register failure , master already started on host : {}" , osHost);
// exit system
System.exit(-1);
}
// specify the format of stored data in ZK nodes
String heartbeatZKInfo = getOsInfo(now);
String heartbeatZKInfo = ResInfo.getHeartBeatInfo(now);
// create temporary sequence nodes for master znode
masterZNode = zkClient.create().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(masterZNode, heartbeatZKInfo.getBytes());
masterZNode = zkClient.create().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(
masterZNodeParentPath + "/" + OSUtils.getHost() + "_", heartbeatZKInfo.getBytes());
logger.info("register master node {} success" , masterZNode);
@ -238,6 +231,46 @@ public class ZKMasterClient extends AbstractZKClient {
}
/**
* check the zookeeper node already exists
* @param host
* @param zkNodeType
* @return
* @throws Exception
*/
private boolean checkZKNodeExists(String host, ZKNodeType zkNodeType) throws Exception {
String path = null;
switch (zkNodeType){
case MASTER:
path = masterZNodeParentPath;
break;
case WORKER:
path = workerZNodeParentPath;
break;
case DEAD_SERVER:
path = deadServerZNodeParentPath;
break;
default:
break;
}
if(StringUtils.isEmpty(path)){
logger.error("check zk node exists error, host:{}, zk node type:{}", host, zkNodeType.toString());
return false;
}
List<String> serverList = null;
serverList = zkClient.getChildren().forPath(path);
if (CollectionUtils.isNotEmpty(serverList)){
for (String masterZNode : serverList){
if (masterZNode.startsWith(host)){
return true;
}
}
}
return false;
}
/**
* monitor master
*/
@ -279,17 +312,9 @@ public class ZKMasterClient extends AbstractZKClient {
for (int i = 0; i < Constants.ESCHEDULER_WARN_TIMES_FAILOVER;i++) {
alertDao.sendServerStopedAlert(1, masterHost, "Master-Server");
}
logger.info("start master failover ...");
List<ProcessInstance> needFailoverProcessInstanceList = processDao.queryNeedFailoverProcessInstances(masterHost);
//updateProcessInstance host is null and insert into command
for(ProcessInstance processInstance : needFailoverProcessInstanceList){
processDao.processNeedFailoverProcessInstances(processInstance);
if(StringUtils.isNotEmpty(masterHost)){
failoverMaster(masterHost);
}
logger.info("master failover end");
}catch (Exception e){
logger.error("master failover failed : " + e.getMessage(),e);
}finally {
@ -331,6 +356,8 @@ public class ZKMasterClient extends AbstractZKClient {
}
/**
* monitor worker
*/
@ -369,23 +396,9 @@ public class ZKMasterClient extends AbstractZKClient {
alertDao.sendServerStopedAlert(1, workerHost, "Worker-Server");
}
logger.info("start worker failover ...");
List<TaskInstance> needFailoverTaskInstanceList = processDao.queryNeedFailoverTaskInstances(workerHost);
for(TaskInstance taskInstance : needFailoverTaskInstanceList){
ProcessInstance instance = processDao.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
if(instance!=null){
taskInstance.setProcessInstance(instance);
}
// only kill yarn job if exists , the local thread has exited
ProcessUtils.killYarnJob(taskInstance);
}
//updateProcessInstance state value is NEED_FAULT_TOLERANCE
processDao.updateNeedFailoverTaskInstances(workerHost);
logger.info("worker failover end");
if(StringUtils.isNotEmpty(workerHost)){
failoverWorker(workerHost, true);
}
}catch (Exception e){
logger.error("worker failover failed : " + e.getMessage(),e);
}
@ -410,22 +423,6 @@ public class ZKMasterClient extends AbstractZKClient {
}
/**
* get os info
* @param now
* @return
*/
private String getOsInfo(Date now) {
return ResInfo.buildHeartbeatForZKInfo(OSUtils.getHost(),
OSUtils.getProcessID(),
OSUtils.cpuUsage(),
OSUtils.memoryUsage(),
DateUtils.dateToString(now),
DateUtils.dateToString(now));
}
/**
* get master znode
* @return
@ -435,50 +432,100 @@ public class ZKMasterClient extends AbstractZKClient {
}
/**
* get master lock path
* @return
*/
public String getMasterLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_MASTERS);
}
/**
* get master start up lock path
* task needs failover if task start before worker starts
*
* @param taskInstance
* @return
*/
public String getMasterStartUpLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS);
private boolean checkTaskInstanceNeedFailover(TaskInstance taskInstance) throws Exception {
boolean taskNeedFailover = true;
// if the worker node exists in zookeeper, we must check the task starts after the worker
if(checkZKNodeExists(taskInstance.getHost(), ZKNodeType.WORKER)){
//if task start after worker starts, there is no need to failover the task.
if(checkTaskAfterWorkerStart(taskInstance)){
taskNeedFailover = false;
}
}
return taskNeedFailover;
}
/**
* get master failover lock path
* check task start after the worker server starts.
* @param taskInstance
* @return
*/
public String getMasterFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_MASTERS);
private boolean checkTaskAfterWorkerStart(TaskInstance taskInstance) {
Date workerServerStartDate = null;
List<WorkerServer> workerServers = processDao.queryWorkerServerByHost(taskInstance.getHost());
if(workerServers.size() > 0){
workerServerStartDate = workerServers.get(0).getCreateTime();
}
if(workerServerStartDate != null){
return taskInstance.getStartTime().after(workerServerStartDate);
}else{
return false;
}
}
/**
* get worker failover lock path
* @return
* failover worker tasks
* 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover.
* @param workerHost
*/
public String getWorkerFailoverLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_WORKERS);
private void failoverWorker(String workerHost, boolean needCheckWorkerAlive) throws Exception {
logger.info("start worker[{}] failover ...", workerHost);
List<TaskInstance> needFailoverTaskInstanceList = processDao.queryNeedFailoverTaskInstances(workerHost);
for(TaskInstance taskInstance : needFailoverTaskInstanceList){
if(needCheckWorkerAlive){
if(!checkTaskInstanceNeedFailover(taskInstance)){
continue;
}
}
ProcessInstance instance = processDao.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
if(instance!=null){
taskInstance.setProcessInstance(instance);
}
// only kill yarn job if exists , the local thread has exited
ProcessUtils.killYarnJob(taskInstance);
taskInstance.setState(ExecutionStatus.NEED_FAULT_TOLERANCE);
processDao.saveTaskInstance(taskInstance);
}
//update task Instance state value is NEED_FAULT_TOLERANCE
// processDao.updateNeedFailoverTaskInstances(workerHost);
logger.info("end worker[{}] failover ...", workerHost);
}
/**
* get zkclient
* @return
* failover master tasks
* @param masterHost
*/
public CuratorFramework getZkClient() {
return zkClient;
}
private void failoverMaster(String masterHost) {
logger.info("start master failover ...");
List<ProcessInstance> needFailoverProcessInstanceList = processDao.queryNeedFailoverProcessInstances(masterHost);
//updateProcessInstance host is null and insert into command
for(ProcessInstance processInstance : needFailoverProcessInstanceList){
processDao.processNeedFailoverProcessInstances(processInstance);
}
logger.info("master failover end");
}
/**
* get host ip
* get host ip, string format: masterParentPath/ip_000001/value
* @param path
* @return
*/
@ -488,6 +535,7 @@ public class ZKMasterClient extends AbstractZKClient {
if(startIndex >= endIndex){
logger.error("parse ip error");
return "";
}
return path.substring(startIndex, endIndex);
}

31
escheduler-server/src/main/java/cn/escheduler/server/zk/ZKWorkerClient.java

@ -116,11 +116,10 @@ public class ZKWorkerClient extends AbstractZKClient {
public String initWorkZNode() throws Exception {
Date now = new Date();
String heartbeatZKInfo = getOsInfo(now);
String heartbeatZKInfo = ResInfo.getHeartBeatInfo(new Date());
workerZNode = workerZNodeParentPath + "/" + OSUtils.getHost() + "_";
workerZNode = zkClient.create().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(workerZNode,
heartbeatZKInfo.getBytes());
logger.info("register worker node {} success", workerZNode);
@ -141,7 +140,6 @@ public class ZKWorkerClient extends AbstractZKClient {
workerZNode = workerZNodeParentPath + "/" + OSUtils.getHost() + "_";
List<String> workerZNodeList = zkClient.getChildren().forPath(workerZNodeParentPath);
if (CollectionUtils.isNotEmpty(workerZNodeList)){
boolean flag = false;
for (String workerZNode : workerZNodeList){
@ -241,21 +239,6 @@ public class ZKWorkerClient extends AbstractZKClient {
}
/**
* get os info
* @param now
* @return
*/
private String getOsInfo(Date now) {
return ResInfo.buildHeartbeatForZKInfo(OSUtils.getHost(),
OSUtils.getProcessID(),
OSUtils.cpuUsage(),
OSUtils.memoryUsage(),
DateUtils.dateToString(now),
DateUtils.dateToString(now));
}
/**
* get worker znode
* @return
@ -264,16 +247,6 @@ public class ZKWorkerClient extends AbstractZKClient {
return workerZNode;
}
/**
* get zkclient
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/**
* get worker lock path
* @return

42
escheduler-server/src/test/java/cn/escheduler/server/master/MasterCommandTest.java

@ -18,15 +18,27 @@ package cn.escheduler.server.master;
import cn.escheduler.common.enums.CommandType;
import cn.escheduler.common.enums.FailureStrategy;
import cn.escheduler.common.enums.TaskDependType;
import cn.escheduler.common.enums.WarningType;
import cn.escheduler.common.graph.DAG;
import cn.escheduler.common.model.TaskNode;
import cn.escheduler.common.model.TaskNodeRelation;
import cn.escheduler.common.process.ProcessDag;
import cn.escheduler.dao.datasource.ConnectionFactory;
import cn.escheduler.dao.mapper.CommandMapper;
import cn.escheduler.dao.mapper.ProcessDefinitionMapper;
import cn.escheduler.dao.model.Command;
import cn.escheduler.dao.model.ProcessDefinition;
import cn.escheduler.dao.utils.DagHelper;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
/**
* master test
*/
@ -36,9 +48,14 @@ public class MasterCommandTest {
private CommandMapper commandMapper;
private ProcessDefinitionMapper processDefinitionMapper;
@Before
public void before(){
commandMapper = ConnectionFactory.getSqlSession().getMapper(CommandMapper.class);
processDefinitionMapper = ConnectionFactory.getSqlSession().getMapper(ProcessDefinitionMapper.class);
}
@ -104,4 +121,29 @@ public class MasterCommandTest {
}
@Test
public void testDagHelper(){
ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineId(19);
try {
ProcessDag processDag = DagHelper.generateFlowDag(processDefinition.getProcessDefinitionJson(),
new ArrayList<>(), new ArrayList<>(), TaskDependType.TASK_POST);
DAG<String,TaskNode,TaskNodeRelation> dag = DagHelper.buildDagGraph(processDag);
Collection<String> start = DagHelper.getStartVertex("1", dag, null);
System.out.println(start.toString());
Map<String, TaskNode> forbidden = DagHelper.getForbiddenTaskNodeMaps(processDefinition.getProcessDefinitionJson());
System.out.println(forbidden);
} catch (Exception e) {
e.printStackTrace();
}
}
}

5
escheduler-server/src/test/java/cn/escheduler/server/zk/ZKWorkerClientTest.java

@ -1,6 +1,7 @@
package cn.escheduler.server.zk;
import cn.escheduler.common.Constants;
import cn.escheduler.common.zk.AbstractZKClient;
import org.junit.Test;
import java.util.Arrays;
@ -17,8 +18,8 @@ public class ZKWorkerClientTest {
public void getZKWorkerClient() throws Exception {
ZKWorkerClient zkWorkerClient = ZKWorkerClient.getZKWorkerClient();
zkWorkerClient.removeDeadServerByHost("127.0.0.1", Constants.WORKER_PREFIX);
// ZKWorkerClient zkWorkerClient = ZKWorkerClient.getZKWorkerClient();
// zkWorkerClient.removeDeadServerByHost("127.0.0.1", Constants.WORKER_PREFIX);
}

2
escheduler-ui/src/js/conf/home/pages/dag/_source/config.js

@ -26,7 +26,7 @@ import Permissions from '@/module/permissions'
* @desc tooltip
*/
const toolOper = (dagThis) => {
let disabled = Permissions.getAuth() === false ? false : !dagThis.$store.state.dag.isDetails
let disabled =!dagThis.$store.state.dag.isDetails// Permissions.getAuth() === false ? false : !dagThis.$store.state.dag.isDetails
return [
{
code: 'pointer',

42
escheduler-ui/src/js/conf/home/pages/dag/_source/dag.vue

@ -8,7 +8,7 @@
:id="v"
v-for="(item,v) in tasksTypeList"
@mousedown="_getDagId(v)">
<div data-toggle="tooltip" :title="item.desc" :class="_isDetails">
<div data-toggle="tooltip" :title="item.desc">
<div class="icos" :class="'icos-' + v" ></div>
</div>
</div>
@ -28,6 +28,17 @@
@click="_toggleView"
icon="fa fa-code">
</x-button>
<x-button
style="vertical-align: middle;"
data-toggle="tooltip"
:title="$t('Startup parameter')"
data-container="body"
type="primary"
size="xsmall"
:disabled="$route.name !== 'projects-instance-details'"
@click="_toggleParam"
icon="fa fa-chevron-circle-right">
</x-button>
<span class="name">{{name}}</span>
&nbsp;
<span v-if="name" class="copy-name" @click="_copyName" :data-clipboard-text="name"><i class="iconfont" data-container="body" data-toggle="tooltip" title="复制名称" >&#xe61e;</i></span>
@ -68,10 +79,9 @@
type="primary"
size="xsmall"
:loading="spinnerLoading"
v-ps="['GENERAL_USER']"
@click="_saveChart"
icon="fa fa-save"
:disabled="isDetails">
>
{{spinnerLoading ? 'Loading...' : $t('Save')}}
</x-button>
</div>
@ -205,9 +215,9 @@
* @param item
*/
_getDagId (v) {
if (this.isDetails) {
return
}
// if (this.isDetails) {
// return
// }
this.dagBarId = v
},
/**
@ -239,11 +249,12 @@
})
},
_operationClass (item) {
if (item.disable) {
return this.toolOperCode === item.code ? 'active' : ''
} else {
return 'disable'
}
return this.toolOperCode === item.code ? 'active' : ''
// if (item.disable) {
// return this.toolOperCode === item.code ? 'active' : ''
// } else {
// return 'disable'
// }
},
/**
* Storage interface
@ -383,6 +394,13 @@
_toggleView () {
findComponentDownward(this.$root, `assist-dag-index`)._toggleView()
},
/**
* Starting parameters
*/
_toggleParam () {
findComponentDownward(this.$root, `starting-params-dag-index`)._toggleParam()
},
/**
* Create a node popup layer
* @param Object id
@ -441,8 +459,6 @@
'tasks': {
deep: true,
handler (o) {
console.log('+++++ save dag params +++++')
console.log(o)
// Edit state does not allow deletion of node a...
this.setIsEditDag(true)

2
escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue

@ -165,7 +165,7 @@
<div class="bottom-box">
<div class="submit" style="background: #fff;">
<x-button type="text" @click="close()"> {{$t('Cancel')}} </x-button>
<x-button type="primary" shape="circle" :loading="spinnerLoading" @click="ok()" :disabled="isDetails" v-ps="['GENERAL_USER']">{{spinnerLoading ? 'Loading...' : $t('Confirm add')}} </x-button>
<x-button type="primary" shape="circle" :loading="spinnerLoading" @click="ok()" :disabled="isDetails">{{spinnerLoading ? 'Loading...' : $t('Confirm add')}} </x-button>
</div>
</div>
</div>

2
escheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/mr.vue

@ -3,7 +3,7 @@
<m-list-box>
<div slot="text">{{$t('Program Type')}}</div>
<div slot="content">
<x-select v-model="programType" :disabled="isDetails" style="width: 100px;">
<x-select v-model="programType" :disabled="isDetails" style="width: 110px;">
<x-option
v-for="city in programTypeList"
:key="city.code"

2
escheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js

@ -71,7 +71,7 @@ JSP.prototype.init = function ({ dag, instance }) {
this.setConfig({
isDrag: !store.state.dag.isDetails,
isAttachment: false,
isNewNodes: Permissions.getAuth() === false ? false : !store.state.dag.isDetails,
isNewNodes: !store.state.dag.isDetails,//Permissions.getAuth() === false ? false : !store.state.dag.isDetails,
isDblclick: true,
isContextmenu: true,
isClick: false

114
escheduler-ui/src/js/conf/home/pages/dag/_source/startingParam/index.vue

@ -0,0 +1,114 @@
<template>
<div class="starting-params-dag-index">
<template v-if="isView && isActive">
<div class="box">
<p class="box-hd"><i class="fa fa-chevron-circle-right"></i><b>{{$t('Startup parameter')}}</b></p>
<ul class="box-bd">
<li><span>{{$t('Startup type')}}</span><span>{{_rtRunningType(startupParam.commandType)}}</span></li>
<li><span>{{$t('Complement range')}}</span><span v-if="startupParam.commandParam && startupParam.commandParam.complementStartDate">{{startupParam.commandParam.complementStartDate}}-{{startupParam.commandParam.complementEndDate}}</span><span v-else>-</span></li>
<li><span>{{$t('Failure Strategy')}}</span><span>{{startupParam.failureStrategy === 'END' ? $t('End') : $t('Continue')}}</span></li>
<li><span>{{$t('Process priority')}}</span><span>{{startupParam.processInstancePriority}}</span></li>
<li><span>{{$t('Worker group')}}</span><span v-if="workerGroupList.length">{{_rtWorkerGroupName(startupParam.workerGroupId)}}</span></li>
<li><span>{{$t('Notification strategy')}}</span><span>{{_rtWarningType(startupParam.warningType)}}</span></li>
<li><span>{{$t('Notification group')}}</span><span v-if="notifyGroupList.length">{{_rtNotifyGroupName(startupParam.warningGroupId)}}</span></li>
<li><span>{{$t('Recipient')}}</span><span>{{startupParam.receivers || '-'}}</span></li>
<li><span>{{$t('Cc')}}</span><span>{{startupParam.receiversCc || '-'}}</span></li>
</ul>
</div>
</template>
</div>
</template>
<script>
import store from '@/conf/home/store'
import { runningType } from '@/conf/home/pages/dag/_source/config'
import { warningTypeList } from '@/conf/home/pages/projects/pages/definition/pages/list/_source/util'
export default {
name: 'starting-params-dag-index',
data () {
return {
store,
startupParam: store.state.dag.startup,
isView: false,
isActive: true,
notifyGroupList: null,
workerGroupList: null
}
},
methods: {
_toggleParam () {
this.isView = !this.isView
},
_rtRunningType (code) {
return _.filter(runningType, v => v.code === code)[0].desc
},
_rtWarningType (id) {
return _.filter(warningTypeList, v => v.id === id)[0].code
},
_rtNotifyGroupName (id) {
let o = _.filter(this.notifyGroupList, v => v.id === id)
if (o && o.length) {
return o[0].code
}
return '-'
},
_rtWorkerGroupName (id) {
let o = _.filter(this.workerGroupList, v => v.id === id)
if (o && o.length) {
return o[0].name
}
return '-'
},
_getNotifyGroupList () {
let notifyGroupListS = _.cloneDeep(this.store.state.dag.notifyGroupListS) || []
if (!notifyGroupListS.length) {
this.store.dispatch('dag/getNotifyGroupList').then(res => {
this.notifyGroupList = res
})
} else {
this.notifyGroupList = notifyGroupListS
}
},
_getWorkerGroupList () {
let stateWorkerGroupsList = this.store.state.security.workerGroupsListAll || []
if (!stateWorkerGroupsList.length) {
this.store.dispatch('security/getWorkerGroupsAll').then(res => {
this.workerGroupList = res
})
} else {
this.workerGroupList = stateWorkerGroupsList
}
}
},
watch: {
'$route': {
deep: true,
handler () {
this.isActive = false
this.$nextTick(() => (this.isActive = true))
}
}
},
mounted () {
this._getNotifyGroupList()
this._getWorkerGroupList()
}
}
</script>
<style lang="scss">
.starting-params-dag-index {
.box {
padding: 5px 10px 10px;
.box-hd {
.fa {
color: #0097e0;
margin-right: 4px;
}
font-size: 16px;
}
.box-bd {
margin-left: 20px;
}
}
}
</style>

56
escheduler-ui/src/js/conf/home/pages/dag/_source/udp/_source/selectTenant.vue

@ -0,0 +1,56 @@
<template>
<x-select
:disabled="isDetails"
@on-change="_onChange"
v-model="value"
style="width: 180px">
<x-option
v-for="item in itemList"
:key="item.id"
:value="item.id"
:label="item.tenantName">
</x-option>
</x-select>
</template>
<script>
import disabledState from '@/module/mixin/disabledState'
export default {
name: 'form-tenant',
data () {
return {
itemList: []
}
},
mixins: [disabledState],
props: {
value: {
type: Number,
default: -1
}
},
model: {
prop: 'value',
event: 'tenantSelectEvent'
},
methods: {
_onChange (o) {
this.value = o.value
this.$emit('tenantSelectEvent', o.value)
}
},
watch: {
},
created () {
let stateTenantAllList = this.store.state.security.tenantAllList || []
if (stateTenantAllList.length) {
this.itemList = stateTenantAllList
} else {
this.store.dispatch('security/getTenantList').then(res => {
this.$nextTick(() => {
this.itemList = res
})
})
}
}
}
</script>

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save