Browse Source

Initial module escheduler-common commit

pull/1/head
ligang 5 years ago
parent
commit
e6f14cf33d
  1. 380
      escheduler-common/pom.xml
  2. 815
      escheduler-common/src/main/java/cn/escheduler/common/Constants.java
  3. 29
      escheduler-common/src/main/java/cn/escheduler/common/IStoppable.java
  4. 27
      escheduler-common/src/main/java/cn/escheduler/common/enums/AlertStatus.java
  5. 27
      escheduler-common/src/main/java/cn/escheduler/common/enums/AlertType.java
  6. 40
      escheduler-common/src/main/java/cn/escheduler/common/enums/CommandType.java
  7. 28
      escheduler-common/src/main/java/cn/escheduler/common/enums/CycleEnum.java
  8. 35
      escheduler-common/src/main/java/cn/escheduler/common/enums/DataType.java
  9. 30
      escheduler-common/src/main/java/cn/escheduler/common/enums/DbType.java
  10. 31
      escheduler-common/src/main/java/cn/escheduler/common/enums/DependResult.java
  11. 29
      escheduler-common/src/main/java/cn/escheduler/common/enums/DependStrategy.java
  12. 25
      escheduler-common/src/main/java/cn/escheduler/common/enums/DependentRelation.java
  13. 27
      escheduler-common/src/main/java/cn/escheduler/common/enums/Direct.java
  14. 101
      escheduler-common/src/main/java/cn/escheduler/common/enums/ExecutionStatus.java
  15. 30
      escheduler-common/src/main/java/cn/escheduler/common/enums/FailureStrategy.java
  16. 33
      escheduler-common/src/main/java/cn/escheduler/common/enums/Flag.java
  17. 31
      escheduler-common/src/main/java/cn/escheduler/common/enums/Priority.java
  18. 30
      escheduler-common/src/main/java/cn/escheduler/common/enums/ProgramType.java
  19. 40
      escheduler-common/src/main/java/cn/escheduler/common/enums/ReleaseState.java
  20. 27
      escheduler-common/src/main/java/cn/escheduler/common/enums/ResourceType.java
  21. 28
      escheduler-common/src/main/java/cn/escheduler/common/enums/RunMode.java
  22. 29
      escheduler-common/src/main/java/cn/escheduler/common/enums/SelfDependStrategy.java
  23. 34
      escheduler-common/src/main/java/cn/escheduler/common/enums/ShowType.java
  24. 30
      escheduler-common/src/main/java/cn/escheduler/common/enums/TaskDependType.java
  25. 67
      escheduler-common/src/main/java/cn/escheduler/common/enums/TaskStateType.java
  26. 29
      escheduler-common/src/main/java/cn/escheduler/common/enums/TaskTimeoutStrategy.java
  27. 34
      escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java
  28. 27
      escheduler-common/src/main/java/cn/escheduler/common/enums/UdfType.java
  29. 28
      escheduler-common/src/main/java/cn/escheduler/common/enums/UserType.java
  30. 31
      escheduler-common/src/main/java/cn/escheduler/common/enums/WarningType.java
  31. 519
      escheduler-common/src/main/java/cn/escheduler/common/graph/DAG.java
  32. 100
      escheduler-common/src/main/java/cn/escheduler/common/job/db/BaseDataSource.java
  33. 50
      escheduler-common/src/main/java/cn/escheduler/common/job/db/DataSourceFactory.java
  34. 77
      escheduler-common/src/main/java/cn/escheduler/common/job/db/HiveDataSource.java
  35. 73
      escheduler-common/src/main/java/cn/escheduler/common/job/db/MySQLDataSource.java
  36. 77
      escheduler-common/src/main/java/cn/escheduler/common/job/db/PostgreDataSource.java
  37. 77
      escheduler-common/src/main/java/cn/escheduler/common/job/db/SparkDataSource.java
  38. 62
      escheduler-common/src/main/java/cn/escheduler/common/model/DateInterval.java
  39. 80
      escheduler-common/src/main/java/cn/escheduler/common/model/DependentItem.java
  40. 44
      escheduler-common/src/main/java/cn/escheduler/common/model/DependentTaskModel.java
  41. 308
      escheduler-common/src/main/java/cn/escheduler/common/model/TaskNode.java
  42. 67
      escheduler-common/src/main/java/cn/escheduler/common/model/TaskNodeRelation.java
  43. 85
      escheduler-common/src/main/java/cn/escheduler/common/process/ProcessDag.java
  44. 143
      escheduler-common/src/main/java/cn/escheduler/common/process/Property.java
  45. 37
      escheduler-common/src/main/java/cn/escheduler/common/process/ResourceInfo.java
  46. 91
      escheduler-common/src/main/java/cn/escheduler/common/queue/ITaskQueue.java
  47. 61
      escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueFactory.java
  48. 365
      escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java
  49. 341
      escheduler-common/src/main/java/cn/escheduler/common/shell/AbstractShell.java
  50. 175
      escheduler-common/src/main/java/cn/escheduler/common/shell/ShellExecutor.java
  51. 69
      escheduler-common/src/main/java/cn/escheduler/common/task/AbstractParameters.java
  52. 38
      escheduler-common/src/main/java/cn/escheduler/common/task/IParameters.java
  53. 81
      escheduler-common/src/main/java/cn/escheduler/common/task/TaskTimeoutParameter.java
  54. 58
      escheduler-common/src/main/java/cn/escheduler/common/task/dependent/DependentParameters.java
  55. 145
      escheduler-common/src/main/java/cn/escheduler/common/task/mr/MapreduceParameters.java
  56. 89
      escheduler-common/src/main/java/cn/escheduler/common/task/procedure/ProcedureParameters.java
  57. 67
      escheduler-common/src/main/java/cn/escheduler/common/task/python/PythonParameters.java
  58. 70
      escheduler-common/src/main/java/cn/escheduler/common/task/shell/ShellParameters.java
  59. 220
      escheduler-common/src/main/java/cn/escheduler/common/task/spark/SparkParameters.java
  60. 147
      escheduler-common/src/main/java/cn/escheduler/common/task/sql/SqlParameters.java
  61. 27
      escheduler-common/src/main/java/cn/escheduler/common/task/sql/SqlType.java
  62. 48
      escheduler-common/src/main/java/cn/escheduler/common/task/subprocess/SubProcessParameters.java
  63. 39
      escheduler-common/src/main/java/cn/escheduler/common/thread/Stopper.java
  64. 310
      escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadPoolExecutors.java
  65. 202
      escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadUtils.java
  66. 697
      escheduler-common/src/main/java/cn/escheduler/common/utils/Bytes.java
  67. 292
      escheduler-common/src/main/java/cn/escheduler/common/utils/CollectionUtils.java
  68. 73
      escheduler-common/src/main/java/cn/escheduler/common/utils/CommonUtils.java
  69. 313
      escheduler-common/src/main/java/cn/escheduler/common/utils/DateUtils.java
  70. 135
      escheduler-common/src/main/java/cn/escheduler/common/utils/DependentUtils.java
  71. 37
      escheduler-common/src/main/java/cn/escheduler/common/utils/EncryptionUtils.java
  72. 36
      escheduler-common/src/main/java/cn/escheduler/common/utils/EnumFieldUtil.java
  73. 372
      escheduler-common/src/main/java/cn/escheduler/common/utils/FileUtils.java
  74. 486
      escheduler-common/src/main/java/cn/escheduler/common/utils/HadoopUtils.java
  75. 100
      escheduler-common/src/main/java/cn/escheduler/common/utils/HttpUtils.java
  76. 258
      escheduler-common/src/main/java/cn/escheduler/common/utils/JSONUtils.java
  77. 297
      escheduler-common/src/main/java/cn/escheduler/common/utils/OSUtils.java
  78. 162
      escheduler-common/src/main/java/cn/escheduler/common/utils/ParameterUtils.java
  79. 192
      escheduler-common/src/main/java/cn/escheduler/common/utils/PropertyUtils.java
  80. 74
      escheduler-common/src/main/java/cn/escheduler/common/utils/TaskParametersUtils.java
  81. 134
      escheduler-common/src/main/java/cn/escheduler/common/utils/dependent/DependentDateUtils.java
  82. 65
      escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/BusinessTimeUtils.java
  83. 99
      escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/PlaceholderUtils.java
  84. 254
      escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/PropertyPlaceholderHelper.java
  85. 512
      escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/TimePlaceholderUtils.java
  86. 340
      escheduler-common/src/main/java/cn/escheduler/common/zk/AbstractZKClient.java
  87. 110
      escheduler-common/src/main/resources/bin/escheduler-daemon.sh
  88. 28
      escheduler-common/src/main/resources/common/common.properties
  89. 8
      escheduler-common/src/main/resources/common/hadoop/hadoop.properties
  90. 39
      escheduler-common/src/main/resources/quartz.properties
  91. 24
      escheduler-common/src/main/resources/zookeeper.properties
  92. 355
      escheduler-common/src/test/java/cn/escheduler/common/graph/DAGTest.java
  93. 140
      escheduler-common/src/test/java/cn/escheduler/common/os/OSUtilsTest.java
  94. 112
      escheduler-common/src/test/java/cn/escheduler/common/os/OshiTest.java
  95. 112
      escheduler-common/src/test/java/cn/escheduler/common/queue/TaskQueueImplTest.java
  96. 76
      escheduler-common/src/test/java/cn/escheduler/common/shell/ShellExecutorTest.java
  97. 53
      escheduler-common/src/test/java/cn/escheduler/common/threadutils/ThreadPoolExecutorsTest.java
  98. 63
      escheduler-common/src/test/java/cn/escheduler/common/utils/CollectionUtilsTest.java
  99. 61
      escheduler-common/src/test/java/cn/escheduler/common/utils/CommonUtilsTest.java
  100. 57
      escheduler-common/src/test/java/cn/escheduler/common/utils/DateUtilsTest.java
  101. Some files were not shown because too many files have changed in this diff Show More

380
escheduler-common/pom.xml

@ -0,0 +1,380 @@
<?xml version="1.0"?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>escheduler</artifactId>
<groupId>cn.analysys</groupId>
<version>1.0.0</version>
</parent>
<artifactId>escheduler-common</artifactId>
<name>escheduler-common</name>
<url>http://maven.apache.org</url>
<packaging>jar</packaging>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
<version>2.12.0</version>
<exclusions>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
</exclusion>
<exclusion>
<artifactId>servlet-api</artifactId>
<groupId>javax.servlet</groupId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
</exclusion>
<exclusion>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
</exclusion>
<exclusion>
<groupId>io.grpc</groupId>
<artifactId>grpc-protobuf</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
</exclusion>
<exclusion>
<groupId>xmlenc</groupId>
<artifactId>xmlenc</artifactId>
</exclusion>
<exclusion>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
<exclusion>
<artifactId>jsr305</artifactId>
<groupId>com.google.code.findbugs</groupId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>servlet-api</artifactId>
<groupId>javax.servlet</groupId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<exclusion>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-all</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-shuffle</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<exclusions>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<exclusion>
<groupId>xmlenc</groupId>
<artifactId>xmlenc</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
</exclusion>
<exclusion>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-all</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jetty.aggregate</groupId>
<artifactId>jetty-all</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-json</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-jvm</artifactId>
</exclusion>
<exclusion>
<groupId>com.github.joshelser</groupId>
<artifactId>dropwizard-metrics-hadoop-metrics2-reporter</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<exclusion>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
</exclusion>
<exclusion>
<artifactId>log4j-slf4j-impl</artifactId>
<groupId>org.apache.logging.log4j</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.mybatis</groupId>
<artifactId>mybatis</artifactId>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
</configuration>
</plugin>
</plugins>
</build>
</project>

815
escheduler-common/src/main/java/cn/escheduler/common/Constants.java

@ -0,0 +1,815 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common;
import cn.escheduler.common.utils.OSUtils;
import java.util.regex.Pattern;
/**
* Constants
*/
public final class Constants {
/**
* zookeeper properties path
*/
public static final String ZOOKEEPER_PROPERTIES_PATH = "zookeeper.properties";
/**
* worker properties path
*/
public static final String WORKER_PROPERTIES_PATH = "worker.properties";
/**
* master properties path
*/
public static final String MASTER_PROPERTIES_PATH = "master.properties";
/**
* hadoop properties path
*/
public static final String HADOOP_PROPERTIES_PATH = "/common/hadoop/hadoop.properties";
/**
* common properties path
*/
public static final String COMMON_PROPERTIES_PATH = "/common/common.properties";
/**
* dao properties path
*/
public static final String DAO_PROPERTIES_PATH = "/dao/data_source.properties";
/**
* fs.defaultFS
*/
public static final String FS_DEFAULTFS = "fs.defaultFS";
/**
* yarn.resourcemanager.ha.rm.idsfs.defaultFS
*/
public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids";
/**
* yarn.application.status.address
*/
public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address";
/**
* spring.redis.maxIdle
*/
public static final String SPRING_REDIS_MAXIDLE = "spring.redis.maxIdle";
/**
* spring.redis.maxTotal
*/
public static final String SPRING_REDIS_MAXTOTAL = "spring.redis.maxTotal";
/**
* spring.redis.host
*/
public static final String SPRING_REDIS_HOST = "spring.redis.host";
/**
* spring.redis.port
*/
public static final String SPRING_REDIS_PORT = "spring.redis.port";
/**
* hdfs configuration
* data.store2hdfs.basepath
*/
public static final String DATA_STORE_2_HDFS_BASEPATH = "data.store2hdfs.basepath";
/**
* data.basedir.path
*/
public static final String DATA_BASEDIR_PATH = "data.basedir.path";
/**
* data.download.basedir.path
*/
public static final String DATA_DOWNLOAD_BASEDIR_PATH = "data.download.basedir.path";
/**
* process.exec.basepath
*/
public static final String PROCESS_EXEC_BASEPATH = "process.exec.basepath";
/**
* escheduler.env.path
*/
public static final String ESCHEDULER_ENV_PATH = "escheduler.env.path";
/**
* escheduler.env.py
*/
public static final String ESCHEDULER_ENV_PY = "escheduler.env.py";
/**
* resource.view.suffixs
*/
public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs";
/**
* development.state
*/
public static final String DEVELOPMENT_STATE = "development.state";
/**
* hdfs.startup.state
*/
public static final String HDFS_STARTUP_STATE = "hdfs.startup.state";
/**
* zookeeper quorum
*/
public static final String ZOOKEEPER_QUORUM = "zookeeper.quorum";
/**
* MasterServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_ESCHEDULER_MASTERS = "zookeeper.escheduler.masters";
/**
* WorkerServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_ESCHEDULER_WORKERS = "zookeeper.escheduler.workers";
/**
* all servers directory registered in zookeeper
*/
public static final String ZOOKEEPER_ESCHEDULER_DEAD_SERVERS = "zookeeper.escheduler.dead.servers";
/**
* MasterServer lock directory registered in zookeeper
*/
public static final String ZOOKEEPER_ESCHEDULER_LOCK_MASTERS = "zookeeper.escheduler.lock.masters";
/**
* WorkerServer lock directory registered in zookeeper
*/
public static final String ZOOKEEPER_ESCHEDULER_LOCK_WORKERS = "zookeeper.escheduler.lock.workers";
/**
* MasterServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_MASTERS = "zookeeper.escheduler.lock.failover.masters";
/**
* WorkerServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_WORKERS = "zookeeper.escheduler.lock.failover.workers";
/**
* need send warn times when master server or worker server failover
*/
public static final int ESCHEDULER_WARN_TIMES_FAILOVER = 3;
/**
* comma ,
*/
public static final String COMMA = ",";
/**
* COLON :
*/
public static final String COLON = ":";
/**
* SINGLE_SLASH /
*/
public static final String SINGLE_SLASH = "/";
/**
* DOUBLE_SLASH //
*/
public static final String DOUBLE_SLASH = "//";
/**
* SEMICOLON ;
*/
public static final String SEMICOLON = ";";
/**
* ZOOKEEPER_SESSION_TIMEOUT
*/
public static final String ZOOKEEPER_SESSION_TIMEOUT = "zookeeper.session.timeout";
public static final String ZOOKEEPER_CONNECTION_TIMEOUT = "zookeeper.connection.timeout";
public static final String ZOOKEEPER_RETRY_SLEEP = "zookeeper.retry.sleep";
public static final String ZOOKEEPER_RETRY_MAXTIME = "zookeeper.retry.maxtime";
public static final String MASTER_HEARTBEAT_INTERVAL = "master.heartbeat.interval";
public static final String MASTER_EXEC_THREADS = "master.exec.threads";
public static final String MASTER_EXEC_TASK_THREADS = "master.exec.task.number";
public static final String MASTER_COMMIT_RETRY_TIMES = "master.task.commit.retryTimes";
public static final String MASTER_COMMIT_RETRY_INTERVAL = "master.task.commit.interval";
public static final String WORKER_EXEC_THREADS = "worker.exec.threads";
public static final String WORKER_HEARTBEAT_INTERVAL = "worker.heartbeat.interval";
public static final String WORKER_FETCH_TASK_NUM = "worker.fetch.task.num";
public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg";
public static final String WORKER_RESERVED_MEMORY = "worker.reserved.memory";
public static final String MASTER_MAX_CPULOAD_AVG = "master.max.cpuload.avg";
public static final String MASTER_RESERVED_MEMORY = "master.reserved.memory";
/**
* escheduler tasks queue
*/
public static final String SCHEDULER_TASKS_QUEUE = "tasks_queue";
public static final String SCHEDULER_TASKS_KILL = "tasks_kill";
public static final String ZOOKEEPER_SCHEDULER_ROOT = "zookeeper.escheduler.root";
public static final String SCHEDULER_QUEUE_IMPL = "escheduler.queue.impl";
public static final String SCHEDULER_QUEUE_REDIS_IMPL = "redis";
/**
* date format of yyyy-MM-dd HH:mm:ss
*/
public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss";
/**
* date format of yyyyMMddHHmmss
*/
public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss";
/**
* http connect time out
*/
public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000;
/**
* http connect request time out
*/
public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000;
/**
* httpclient soceket time out
*/
public static final int SOCKET_TIMEOUT = 60 * 1000;
/**
* http header
*/
public static final String HTTP_HEADER_UNKNOWN = "unKnown";
/**
* http X-Forwarded-For
*/
public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For";
/**
* http X-Real-IP
*/
public static final String HTTP_X_REAL_IP = "X-Real-IP";
/**
* UTF-8
*/
public static final String UTF_8 = "UTF-8";
/**
* user name regex
*/
public static final Pattern REGEX_USER_NAME = Pattern.compile("[a-zA-Z0-9]{3,20}");
/**
* email regex
*/
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+$");
/**
* read permission
*/
public static final int READ_PERMISSION = 2 * 1;
/**
* write permission
*/
public static final int WRITE_PERMISSION = 2 * 2;
/**
* execute permission
*/
public static final int EXECUTE_PERMISSION = 1;
/**
* default admin permission
*/
public static final int DEFAULT_ADMIN_PERMISSION = 7;
/**
* all permissions
*/
public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION;
/**
* max task timeout
*/
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
/**
* heartbeat threads number
*/
public static final int defaulWorkerHeartbeatThreadNum = 5;
/**
* heartbeat interval
*/
public static final int defaultWorkerHeartbeatInterval = 60;
/**
* worker fetch task number
*/
public static final int defaultWorkerFetchTaskNum = 1;
/**
* worker execute threads number
*/
public static final int defaultWorkerExecThreadNum = 10;
/**
* master cpu load
*/
public static final int defaultMasterCpuLoad = Runtime.getRuntime().availableProcessors() * 2;
/**
* master reserved memory
*/
public static final double defaultMasterReservedMemory = OSUtils.totalMemorySize() / 10;
/**
* worker cpu load
*/
public static final int defaultWorkerCpuLoad = Runtime.getRuntime().availableProcessors() * 2;
/**
* worker reserved memory
*/
public static final double defaultWorkerReservedMemory = OSUtils.totalMemorySize() / 10;
/**
* master execute threads number
*/
public static final int defaultMasterExecThreadNum = 100;
/**
* default master concurrent task execute num
*/
public static final int defaultMasterTaskExecNum = 20;
/**
* default log cache rows num,output when reach the number
*/
public static final int defaultLogRowsNum = 4 * 16;
/**
* log flush intervaloutput when reach the interval
*/
public static final int defaultLogFlushInterval = 1000;
/**
* default master heartbeat thread number
*/
public static final int defaulMasterHeartbeatThreadNum = 5;
/**
* default master heartbeat interval
*/
public static final int defaultMasterHeartbeatInterval = 60;
/**
* default master commit retry times
*/
public static final int defaultMasterCommitRetryTimes = 5;
/**
* default master commit retry interval
*/
public static final int defaultMasterCommitRetryInterval = 100;
/**
* time unit secong to minutes
*/
public static final int SEC_2_MINUTES_TIME_UNIT = 60;
/***
*
* rpc port
*/
public static final int RPC_PORT = 50051;
/**
* forbid running task
*/
public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN";
/**
* task record configuration path
*/
public static final String TASK_RECORD_PROPERTIES_PATH = "dao/data_source.properties";
public static final String TASK_RECORD_URL = "task.record.datasource.url";
public static final String TASK_RECORD_FLAG = "task.record.flag";
public static final String TASK_RECORD_USER = "task.record.datasource.username";
public static final String TASK_RECORD_PWD = "task.record.datasource.password";
public static final String STATUS = "status";
/**
* command parameter keys
*/
public static final String CMDPARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId";
public static final String CMDPARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList";
public static final String CMDPARAM_RECOVERY_WAITTING_THREAD = "WaittingThreadInstanceId";
public static final String CMDPARAM_SUB_PROCESS = "processInstanceId";
public static final String CMDPARAM_EMPTY_SUB_PROCESS = "0";
public static final String CMDPARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId";
public static final String CMDPARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId";
public static final String CMDPARAM_START_NODE_NAMES = "StartNodeNameList";
/**
* complement data start date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate";
/**
* complement data end date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate";
/**
* hadoop configuration
*/
public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE";
public static final String HADOOP_RM_STATE_STANDBY = "STANDBY";
public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port";
/**
* data source config
*/
public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name";
public static final String SPRING_DATASOURCE_URL = "spring.datasource.url";
public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username";
public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout";
public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize";
public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle";
public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive";
public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis";
public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery";
public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle";
public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow";
public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn";
public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements";
public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit";
public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive";
public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize";
public static final String DEVELOPMENT = "development";
public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties";
/**
* sleep time
*/
public static final int SLEEP_TIME_MILLIS = 1000;
/**
* heartbeat for zk info length
*/
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 6;
/**
* hadoop params constant
*/
/**
* jar
*/
public static final String JAR = "jar";
/**
* hadoop
*/
public static final String HADOOP = "hadoop";
/**
* -D parameter
*/
public static final String D = "-D";
/**
* -D mapreduce.job.queuename=ququename
*/
public static final String MR_QUEUE = "mapreduce.job.queuename";
/**
* jdbc class name
*/
/**
* mysql
*/
public static final String JDBC_MYSQL_CLASS_NAME = "com.mysql.jdbc.Driver";
/**
* postgresql
*/
public static final String JDBC_POSTGRESQL_CLASS_NAME = "org.postgresql.Driver";
/**
* postgresql
*/
public static final String JDBC_HIVE_CLASS_NAME = "org.apache.hive.jdbc.HiveDriver";
/**
* postgresql
*/
public static final String JDBC_SPARK_CLASS_NAME = "org.apache.hive.jdbc.HiveDriver";
/**
* spark params constant
*/
public static final String MASTER = "--master";
public static final String DEPLOY_MODE = "--deploy-mode";
/**
* --class CLASS_NAME
*/
public static final String CLASS = "--class";
/**
* --driver-cores NUM
*/
public static final String DRIVER_CORES = "--driver-cores";
/**
* --driver-memory MEM
*/
public static final String DRIVER_MEMORY = "--driver-memory";
/**
* --num-executors NUM
*/
public static final String NUM_EXECUTORS = "--num-executors";
/**
* --executor-cores NUM
*/
public static final String EXECUTOR_CORES = "--executor-cores";
/**
* --executor-memory MEM
*/
public static final String EXECUTOR_MEMORY = "--executor-memory";
/**
* --queue QUEUE
*/
public static final String SPARK_QUEUE = "--queue";
/**
* exit code success
*/
public static final int EXIT_CODE_SUCCESS = 0;
/**
* exit code kill
*/
public static final int EXIT_CODE_KILL = 137;
/**
* exit code failure
*/
public static final int EXIT_CODE_FAILURE = -1;
/**
* date format of yyyyMMdd
*/
public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd";
/**
* date format of yyyyMMddHHmmss
*/
public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss";
/**
* system date(yyyyMMddHHmmss)
*/
public static final String PARAMETER_DATETIME = "system.datetime";
/**
* system date(yyyymmdd) today
*/
public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate";
/**
* system date(yyyymmdd) yesterday
*/
public static final String PARAMETER_BUSINESS_DATE = "system.biz.date";
/**
* ACCEPTED
*/
public static final String ACCEPTED = "ACCEPTED";
/**
* SUCCEEDED
*/
public static final String SUCCEEDED = "SUCCEEDED";
/**
* NEW
*/
public static final String NEW = "NEW";
/**
* NEW_SAVING
*/
public static final String NEW_SAVING = "NEW_SAVING";
/**
* SUBMITTED
*/
public static final String SUBMITTED = "SUBMITTED";
/**
* FAILED
*/
public static final String FAILED = "FAILED";
/**
* KILLED
*/
public static final String KILLED = "KILLED";
/**
* RUNNING
*/
public static final String RUNNING = "RUNNING";
/**
* underline "_"
*/
public static final String UNDERLINE = "_";
/**
* quartz job prifix
*/
public static final String QUARTZ_JOB_PRIFIX = "job";
/**
* quartz job group prifix
*/
public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup";
/**
* projectId
*/
public static final String PROJECT_ID = "projectId";
/**
* processId
*/
public static final String SCHEDULE_ID = "scheduleId";
/**
* schedule
*/
public static final String SCHEDULE = "schedule";
/**
* application regex
*/
public static final String APPLICATION_REGEX = "application_\\d+_\\d+";
public static final String PID = "pid";
/**
* month_begin
*/
public static final String MONTH_BEGIN = "month_begin";
/**
* add_months
*/
public static final String ADD_MONTHS = "add_months";
/**
* month_end
*/
public static final String MONTH_END = "month_end";
/**
* week_begin
*/
public static final String WEEK_BEGIN = "week_begin";
/**
* week_end
*/
public static final String WEEK_END = "week_end";
/**
* timestamp
*/
public static final String TIMESTAMP = "timestamp";
public static final char SUBTRACT_CHAR = '-';
public static final char ADD_CHAR = '+';
public static final char MULTIPLY_CHAR = '*';
public static final char DIVISION_CHAR = '/';
public static final char LEFT_BRACE_CHAR = '(';
public static final char RIGHT_BRACE_CHAR = ')';
public static final String ADD_STRING = "+";
public static final String MULTIPLY_STRING = "*";
public static final String DIVISION_STRING = "/";
public static final String LEFT_BRACE_STRING = "(";
public static final char P = 'P';
public static final char N = 'N';
public static final String SUBTRACT_STRING = "-";
public static final String GLOBAL_PARAMS = "globalParams";
public static final String LOCAL_PARAMS = "localParams";
public static final String PROCESS_INSTANCE_STATE = "processInstanceState";
public static final String TASK_LIST = "taskList";
public static final String RWXR_XR_X = "rwxr-xr-x";
/**
* master/worker server use for zk
*/
public static final String MASTER_PREFIX = "master";
public static final String WORKER_PREFIX = "worker";
public static final String DELETE_ZK_OP = "delete";
public static final String ADD_ZK_OP = "add";
public static final String ALIAS = "alias";
public static final String CONTENT = "content";
public static final String DEPENDENT_SPLIT = ":||";
public static final String DEPENDENT_ALL = "ALL";
}

29
escheduler-common/src/main/java/cn/escheduler/common/IStoppable.java

@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common;
/**
* server stop interface.
*/
public interface IStoppable {
/**
* Stop this service.
* @param cause why stopping
*/
public void stop(String cause);
}

27
escheduler-common/src/main/java/cn/escheduler/common/enums/AlertStatus.java

@ -0,0 +1,27 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* alert status
*/
public enum AlertStatus {
/**
* 0 waiting executed; 1 execute successfully2 execute failed
*/
WAIT_EXECUTION,EXECUTION_SUCCESS,EXECUTION_FAILURE
}

27
escheduler-common/src/main/java/cn/escheduler/common/enums/AlertType.java

@ -0,0 +1,27 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* warning message notification method
*/
public enum AlertType {
/**
* 0 email; 1 SMS
*/
EMAIL,SMS
}

40
escheduler-common/src/main/java/cn/escheduler/common/enums/CommandType.java

@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* command types
*/
public enum CommandType {
/**
* command types
* 0 start a new process
* 1 start a new process from current nodes
* 2 recover tolerance fault work flow
* 3 start process from paused task nodes
* 4 start process from failure task nodes
* 5 complement data
* 6 start a new process from scheduler
* 7 repeat running a work flow
* 8 pause a process
* 9 stop a process
* 10 recover waiting thread
*/
START_PROCESS, START_CURRENT_TASK_PROCESS, RECOVER_TOLERANCE_FAULT_PROCESS, RECOVER_SUSPENDED_PROCESS,
START_FAILURE_TASK_PROCESS,COMPLEMENT_DATA,SCHEDULER, REPEAT_RUNNING,PAUSE,STOP,RECOVER_WAITTING_THREAD;
}

28
escheduler-common/src/main/java/cn/escheduler/common/enums/CycleEnum.java

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* cycle enums
*/
public enum CycleEnum {
/**
* 0 minute; 1 hour; 2 day; 3 week; 4 month; 5 year;
*/
MINUTE, HOUR, DAY, WEEK, MONTH, YEAR
}

35
escheduler-common/src/main/java/cn/escheduler/common/enums/DataType.java

@ -0,0 +1,35 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* data types in user define parameter
*/
public enum DataType {
/**
* 0 string
* 1 integer
* 2 long
* 3 float
* 4 double
* 5 date, "YYYY-MM-DD"
* 6 time, "HH:MM:SS"
* 7 time stamp
* 8 Boolean
*/
VARCHAR,INTEGER,LONG,FLOAT,DOUBLE,DATE,TIME,TIMESTAMP,BOOLEAN
}

30
escheduler-common/src/main/java/cn/escheduler/common/enums/DbType.java

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* data base types
*/
public enum DbType {
/**
* 0 mysql
* 1 postgresql
* 2 hive
* 3 spark
*/
MYSQL, POSTGRESQL, HIVE, SPARK
}

31
escheduler-common/src/main/java/cn/escheduler/common/enums/DependResult.java

@ -0,0 +1,31 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* depend result
*/
public enum DependResult {
/**
* 0 success
* 1 waiting
* 2 failed
*/
SUCCESS, WAITING, FAILED
}

29
escheduler-common/src/main/java/cn/escheduler/common/enums/DependStrategy.java

@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* depend strategy
*/
public enum DependStrategy {
/**
* 0 none1 all success 2 all failed 3 one success 4 one failed
*/
NONE, ALL_SUCCESS, ALL_FAILED, ONE_SUCCESS, ONE_FAILED
}

25
escheduler-common/src/main/java/cn/escheduler/common/enums/DependentRelation.java

@ -0,0 +1,25 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* dependent relation: and or
*/
public enum DependentRelation {
AND,OR;
}

27
escheduler-common/src/main/java/cn/escheduler/common/enums/Direct.java

@ -0,0 +1,27 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* parameter of stored procedure
*/
public enum Direct {
/**
* 0 in; 1 out;
*/
IN,OUT
}

101
escheduler-common/src/main/java/cn/escheduler/common/enums/ExecutionStatus.java

@ -0,0 +1,101 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* runing status for work flow and task nodes
*
*/
public enum ExecutionStatus {
/**
* status
* 0 submit success
* 1 running
* 2 ready pause
* 3 pause
* 4 ready stop
* 5 stop
* 6 failure
* 7 success
* 8 need fault tolerance
* 9 kill
* 10 waiting thread
* 11 waiting depend node complete
*/
SUBMITTED_SUCCESS,RUNNING_EXEUTION,READY_PAUSE,PAUSE,READY_STOP,STOP,FAILURE,SUCCESS,
NEED_FAULT_TOLERANCE,KILL,WAITTING_THREAD,WAITTING_DEPEND;
/**
* status is success
* @return
*/
public boolean typeIsSuccess(){
return this == SUCCESS;
}
/**
* status is failure
* @return
*/
public boolean typeIsFailure(){
return this == FAILURE || this == NEED_FAULT_TOLERANCE;
}
/**
* status is finished
* @return
*/
public boolean typeIsFinished(){
return typeIsSuccess() || typeIsFailure() || typeIsCancel() || typeIsPause()
|| typeIsWaittingThread();
}
/**
* status is waiting thread
* @return
*/
public boolean typeIsWaittingThread(){
return this == WAITTING_THREAD;
}
/**
* status is pause
* @return
*/
public boolean typeIsPause(){
return this == PAUSE;
}
/**
* status is running
* @return
*/
public boolean typeIsRunning(){
return this == RUNNING_EXEUTION || this == WAITTING_DEPEND;
}
/**
* status is cancel
*/
public boolean typeIsCancel(){ return this == KILL || this == STOP ;}
}

30
escheduler-common/src/main/java/cn/escheduler/common/enums/FailureStrategy.java

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* failure policy when some task node failed.
*/
public enum FailureStrategy {
/**
* 0 ending process when some tasks failed.
* 1 continue running when some tasks failed.
**/
END, CONTINUE;
}

33
escheduler-common/src/main/java/cn/escheduler/common/enums/Flag.java

@ -0,0 +1,33 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* have_script
* have_file
* can_retry
* have_arr_variables
* have_map_variables
* have_alert
*/
public enum Flag {
/**
* 0 no
* 1 yes
*/
NO,YES
}

31
escheduler-common/src/main/java/cn/escheduler/common/enums/Priority.java

@ -0,0 +1,31 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* define process and task priority
*/
public enum Priority {
/**
* 0 highest priority
* 1 higher priority
* 2 medium priority
* 3 lower priority
* 4 lowest priority
*/
HIGHEST,HIGH,MEDIUM,LOW,LOWEST
}

30
escheduler-common/src/main/java/cn/escheduler/common/enums/ProgramType.java

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* support program types
*/
public enum ProgramType {
/**
* 0 JAVA,1 SCALA,2 PYTHON
*/
JAVA,
SCALA,
PYTHON
}

40
escheduler-common/src/main/java/cn/escheduler/common/enums/ReleaseState.java

@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* process define release state
*/
public enum ReleaseState {
/**
* 0 offline
* 1 on line
*/
OFFLINE,ONLINE;
public static ReleaseState getEnum(int value){
for (ReleaseState e:ReleaseState.values()) {
if(e.ordinal() == value) {
return e;
}
}
//For values out of enum scope
return null;
}
}

27
escheduler-common/src/main/java/cn/escheduler/common/enums/ResourceType.java

@ -0,0 +1,27 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* resource type
*/
public enum ResourceType {
/**
* 0 file, 1 udf
*/
FILE,UDF
}

28
escheduler-common/src/main/java/cn/escheduler/common/enums/RunMode.java

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* complement data run mode
*/
public enum RunMode {
/**
* 0 serial run
* 1 parallel run
* */
RUN_MODE_SERIAL, RUN_MODE_PARALLEL
}

29
escheduler-common/src/main/java/cn/escheduler/common/enums/SelfDependStrategy.java

@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* self depency strategy
*/
public enum SelfDependStrategy {
/**
* 0 donot depend the last cycle;
* 1 depend the last cycle
**/
NO_DEP_PRE, DEP_PRE
}

34
escheduler-common/src/main/java/cn/escheduler/common/enums/ShowType.java

@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* show type for email
*/
public enum ShowType {
/**
* 0 TABLE;
* 1 TEXT;
* 2 attachment;
* 3 TABLE+attachment;
*/
TABLE,
TEXT,
ATTACHMENT,
TABLEATTACHMENT
}

30
escheduler-common/src/main/java/cn/escheduler/common/enums/TaskDependType.java

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* task node depend type
*/
public enum TaskDependType {
/**
* 0 run current tasks only
* 1 run current tasks and previous tasks
* 2 run current tasks and the other tasks that depend on current tasks;
*/
TASK_ONLY, TASK_PRE, TASK_POST;
}

67
escheduler-common/src/main/java/cn/escheduler/common/enums/TaskStateType.java

@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* type of task state
*/
public enum TaskStateType {
/**
* 0 waiting running
* 1 running
* 2 finish
* 3 failed
* 4 success
*/
WAITTING, RUNNING, FINISH, FAILED, SUCCESS;
/**
* convert task state to execute status integer array ;
* @param taskStateType
* @return
*/
public static int[] convert2ExecutStatusIntArray(TaskStateType taskStateType){
switch (taskStateType){
case SUCCESS:
return new int[]{ExecutionStatus.SUCCESS.ordinal()};
case FAILED:
return new int[]{
ExecutionStatus.FAILURE.ordinal(),
ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal()};
case FINISH:
return new int[]{
ExecutionStatus.PAUSE.ordinal(),
ExecutionStatus.STOP.ordinal()
};
case RUNNING:
return new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXEUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
case WAITTING:
return new int[]{
ExecutionStatus.SUBMITTED_SUCCESS.ordinal()
};
default:
break;
}
return null;
}
}

29
escheduler-common/src/main/java/cn/escheduler/common/enums/TaskTimeoutStrategy.java

@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* task timeout strategy
*/
public enum TaskTimeoutStrategy {
/**
* 0 warn
* 1 failed
* 2 warn+failed
*/
WARN, FAILED, WARNFAILED
}

34
escheduler-common/src/main/java/cn/escheduler/common/enums/TaskType.java

@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* task node type
*/
public enum TaskType {
/**
* 0 SHELL
* 1 SQL
* 2 SUB_PROCESS
* 3 PROCEDURE
* 4 MR
* 5 SPARK
* 6 PYTHON
* 7 DEPENDENT
*/
SHELL,SQL, SUB_PROCESS,PROCEDURE,MR,SPARK,PYTHON,DEPENDENT
}

27
escheduler-common/src/main/java/cn/escheduler/common/enums/UdfType.java

@ -0,0 +1,27 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* UDF type
*/
public enum UdfType {
/**
* 0 hive; 1 spark
*/
HIVE, SPARK
}

28
escheduler-common/src/main/java/cn/escheduler/common/enums/UserType.java

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* user type
*/
public enum UserType {
/**
* 0 admin user; 1 general user
*/
ADMIN_USER,
GENERAL_USER
}

31
escheduler-common/src/main/java/cn/escheduler/common/enums/WarningType.java

@ -0,0 +1,31 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* types for whether to send warning when process ending;
*/
public enum WarningType {
/**
* 0 do not send warning;
* 1 send if process success;
* 2 send if process failed;
* 3 send if process ending;
*/
NONE, SUCCESS, FAILURE, ALL;
}

519
escheduler-common/src/main/java/cn/escheduler/common/graph/DAG.java

@ -0,0 +1,519 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.graph;
import cn.escheduler.common.utils.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* analysis of DAG
* Node: node
* NodeInfonode description information
* EdgeInfo: edge description information
*/
public class DAG<Node, NodeInfo, EdgeInfo> {
private static final Logger logger = LoggerFactory.getLogger(DAG.class);
private final ReadWriteLock lock = new ReentrantReadWriteLock();
/**
* node map, key is node, value is node information
*/
private volatile Map<Node, NodeInfo> nodesMap;
/**
* edge map. key is node of origin;value is Map with key for destination node and value for edge
*/
private volatile Map<Node, Map<Node, EdgeInfo>> edgesMap;
/**
* reversed edge setkey is node of destination, value is Map with key for origin node and value for edge
*/
private volatile Map<Node, Map<Node, EdgeInfo>> reverseEdgesMap;
public DAG() {
nodesMap = new HashMap<>();
edgesMap = new HashMap<>();
reverseEdgesMap = new HashMap<>();
}
/**
* add node information
*
* @param node node
* @param nodeInfo node information
*/
public void addNode(Node node, NodeInfo nodeInfo) {
lock.writeLock().lock();
try{
nodesMap.put(node, nodeInfo);
}finally {
lock.writeLock().unlock();
}
}
/**
* add edge
* @param fromNode node of origin
* @param toNode node of destination
* @return The result of adding an edge. returns false if the DAG result is a ring result
*/
public boolean addEdge(Node fromNode, Node toNode) {
return addEdge(fromNode, toNode, false);
}
/**
* add edge
* @param fromNode node of origin
* @param toNode node of destination
* @param createNode whether the node needs to be created if it does not exist
* @return The result of adding an edge. returns false if the DAG result is a ring result
*/
private boolean addEdge(Node fromNode, Node toNode, boolean createNode) {
return addEdge(fromNode, toNode, null, createNode);
}
/**
* add edge
*
* @param fromNode node of origin
* @param toNode node of destination
* @param edge edge description
* @param createNode whether the node needs to be created if it does not exist
* @return The result of adding an edge. returns false if the DAG result is a ring result
*/
public boolean addEdge(Node fromNode, Node toNode, EdgeInfo edge, boolean createNode) {
lock.writeLock().lock();
try{
// Whether an edge can be successfully added(fromNode -> toNode)
if (!isLegalAddEdge(fromNode, toNode, createNode)) {
logger.error("serious error: add edge({} -> {}) is invalid, cause cycle!", fromNode, toNode);
return false;
}
addNodeIfAbsent(fromNode, null);
addNodeIfAbsent(toNode, null);
addEdge(fromNode, toNode, edge, edgesMap);
addEdge(toNode, fromNode, edge, reverseEdgesMap);
return true;
}finally {
lock.writeLock().unlock();
}
}
/**
* whether this node is contained
*
* @param node node
* @return
*/
public boolean containsNode(Node node) {
lock.readLock().lock();
try{
return nodesMap.containsKey(node);
}finally {
lock.readLock().unlock();
}
}
/**
* whether this edge is contained
*
* @param fromNode node of origin
* @param toNode node of destination
* @return
*/
public boolean containsEdge(Node fromNode, Node toNode) {
lock.readLock().lock();
try{
Map<Node, EdgeInfo> endEdges = edgesMap.get(fromNode);
if (endEdges == null) {
return false;
}
return endEdges.containsKey(toNode);
}finally {
lock.readLock().unlock();
}
}
/**
* get node description
*
* @param node node
* @return
*/
public NodeInfo getNode(Node node) {
lock.readLock().lock();
try{
return nodesMap.get(node);
}finally {
lock.readLock().unlock();
}
}
/**
* Get the number of nodes
*
* @return
*/
public int getNodesCount() {
lock.readLock().lock();
try{
return nodesMap.size();
}finally {
lock.readLock().unlock();
}
}
/**
* Get the number of edges
*
* @return
*/
public int getEdgesCount() {
lock.readLock().lock();
try{
int count = 0;
for (Map.Entry<Node, Map<Node, EdgeInfo>> entry : edgesMap.entrySet()) {
count += entry.getValue().size();
}
return count;
}finally {
lock.readLock().unlock();
}
}
/**
* get the start node of DAG
*
* @return
*/
public Collection<Node> getBeginNode() {
lock.readLock().lock();
try{
return CollectionUtils.subtract(nodesMap.keySet(), reverseEdgesMap.keySet());
}finally {
lock.readLock().unlock();
}
}
/**
* get the end node of DAG
*
* @return
*/
public Collection<Node> getEndNode() {
lock.readLock().lock();
try{
return CollectionUtils.subtract(nodesMap.keySet(), edgesMap.keySet());
}finally {
lock.readLock().unlock();
}
}
/**
* Gets all previous nodes of the node
*
* @param node node id to be calculated
* @return
*/
public Set<Node> getPreviousNodes(Node node) {
lock.readLock().lock();
try{
return getNeighborNodes(node, reverseEdgesMap);
}finally {
lock.readLock().unlock();
}
}
/**
* Get all subsequent nodes of the node
*
* @param node node id to be calculated
* @return
*/
public Set<Node> getSubsequentNodes(Node node) {
lock.readLock().lock();
try{
return getNeighborNodes(node, edgesMap);
}finally {
lock.readLock().unlock();
}
}
/**
* Gets the degree of entry of the node
*
* @param node node id
* @return
*/
public int getIndegree(Node node) {
lock.readLock().lock();
try{
return getPreviousNodes(node).size();
}finally {
lock.readLock().unlock();
}
}
/**
* whether the graph has a ring
*
* @return true if has cycle, else return false.
*/
public boolean hasCycle() {
lock.readLock().lock();
try{
return !topologicalSortImpl().getKey();
}finally {
lock.readLock().unlock();
}
}
/**
* Only DAG has a topological sort
* @return topologically sorted results, returns false if the DAG result is a ring result
* @throws Exception
*/
public List<Node> topologicalSort() throws Exception {
lock.readLock().lock();
try{
Map.Entry<Boolean, List<Node>> entry = topologicalSortImpl();
if (entry.getKey()) {
return entry.getValue();
}
throw new Exception("serious error: graph has cycle ! ");
}finally {
lock.readLock().unlock();
}
}
/**
* if tho node does not exist,add this node
*
* @param node node
* @param nodeInfo node information
*/
private void addNodeIfAbsent(Node node, NodeInfo nodeInfo) {
if (!containsNode(node)) {
addNode(node, nodeInfo);
}
}
/**
* add edge
*
* @param fromNode node of origin
* @param toNode node of destination
* @param edge edge description
* @param edges edge set
*/
private void addEdge(Node fromNode, Node toNode, EdgeInfo edge, Map<Node, Map<Node, EdgeInfo>> edges) {
edges.putIfAbsent(fromNode, new HashMap<>());
Map<Node, EdgeInfo> toNodeEdges = edges.get(fromNode);
toNodeEdges.put(toNode, edge);
}
/**
* Whether an edge can be successfully added(fromNode -> toNode)
* need to determine whether the DAG has cycle
*
* @param fromNode node of origin
* @param toNode node of destination
* @param createNode whether to create a node
* @return
*/
private boolean isLegalAddEdge(Node fromNode, Node toNode, boolean createNode) {
if (fromNode.equals(toNode)) {
logger.error("edge fromNode({}) can't equals toNode({})", fromNode, toNode);
return false;
}
if (!createNode) {
if (!containsNode(fromNode) || !containsNode(toNode)){
logger.error("edge fromNode({}) or toNode({}) is not in vertices map", fromNode, toNode);
return false;
}
}
// Whether an edge can be successfully added(fromNode -> toNode),need to determine whether the DAG has cycle!
int verticesCount = getNodesCount();
Queue<Node> queue = new LinkedList<>();
queue.add(toNode);
// if DAG doesn't find fromNode, it's not has cycle!
while (!queue.isEmpty() && (--verticesCount > 0)) {
Node key = queue.poll();
for (Node subsequentNode : getSubsequentNodes(key)) {
if (subsequentNode.equals(fromNode)) {
return false;
}
queue.add(subsequentNode);
}
}
return true;
}
/**
* Get all neighbor nodes of the node
*
* @param node Node id to be calculated
* @param edges neighbor edge information
* @return
*/
private Set<Node> getNeighborNodes(Node node, final Map<Node, Map<Node, EdgeInfo>> edges) {
final Map<Node, EdgeInfo> neighborEdges = edges.get(node);
if (neighborEdges == null) {
return Collections.EMPTY_MAP.keySet();
}
return neighborEdges.keySet();
}
/**
* Determine whether there are ring and topological sorting results
*
* Directed acyclic graph (DAG) has topological ordering
* Breadth First Search
* 1Traversal of all the vertices in the graph, the degree of entry is 0 vertex into the queue
* 2Poll a vertex in the queue to update its adjacency (minus 1) and queue the adjacency if it is 0 after minus 1
* 3Do step 2 until the queue is empty
* If you cannot traverse all the nodes, it means that the current graph is not a directed acyclic graph.
* There is no topological sort.
*
*
* @return key Returns the state
* if success (acyclic) is true, failure (acyclic) is looped,
* and value (possibly one of the topological sort results)
*/
private Map.Entry<Boolean, List<Node>> topologicalSortImpl() {
// node queue with degree of entry 0
Queue<Node> zeroIndegreeNodeQueue = new LinkedList<>();
// save result
List<Node> topoResultList = new ArrayList<>();
// save the node whose degree is not 0
Map<Node, Integer> notZeroIndegreeNodeMap = new HashMap<>();
// Scan all the vertices and push vertexs with an entry degree of 0 to queue
for (Map.Entry<Node, NodeInfo> vertices : nodesMap.entrySet()) {
Node node = vertices.getKey();
int inDegree = getIndegree(node);
if (inDegree == 0) {
zeroIndegreeNodeQueue.add(node);
topoResultList.add(node);
} else {
notZeroIndegreeNodeMap.put(node, inDegree);
}
}
/**
* After scanning, there is no node with 0 degree of entry,
* indicating that there is a ring, and return directly
*/
if(zeroIndegreeNodeQueue.isEmpty()){
return new AbstractMap.SimpleEntry(false, topoResultList);
}
// The topology algorithm is used to delete nodes with 0 degree of entry and its associated edges
while (!zeroIndegreeNodeQueue.isEmpty()) {
Node v = zeroIndegreeNodeQueue.poll();
// Get the neighbor node
Set<Node> subsequentNodes = getSubsequentNodes(v);
for (Node subsequentNode : subsequentNodes) {
Integer degree = notZeroIndegreeNodeMap.get(subsequentNode);
if(--degree == 0){
topoResultList.add(subsequentNode);
zeroIndegreeNodeQueue.add(subsequentNode);
notZeroIndegreeNodeMap.remove(subsequentNode);
}else{
notZeroIndegreeNodeMap.put(subsequentNode, degree);
}
}
}
// if notZeroIndegreeNodeMap is empty,there is no ring!
AbstractMap.SimpleEntry resultMap = new AbstractMap.SimpleEntry(notZeroIndegreeNodeMap.size() == 0 , topoResultList);
return resultMap;
}
}

100
escheduler-common/src/main/java/cn/escheduler/common/job/db/BaseDataSource.java

@ -0,0 +1,100 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.job.db;
/**
* data source base class
*/
public abstract class BaseDataSource {
/**
* user name
*/
private String user;
/**
* user password
*/
private String password;
/**
* data source address
*/
private String address;
/**
* database name
*/
private String database;
/**
* other connection parameters for the data source
*/
private String other;
/**
* test whether the data source can be connected successfully
* @throws Exception
*/
public abstract void isConnectable() throws Exception;
/**
* gets the JDBC url for the data source connection
* @return
*/
public abstract String getJdbcUrl();
public String getUser() {
return user;
}
public void setUser(String user) {
this.user = user;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public String getAddress() {
return address;
}
public void setAddress(String address) {
this.address = address;
}
public String getDatabase() {
return database;
}
public void setDatabase(String database) {
this.database = database;
}
public String getOther() {
return other;
}
public void setOther(String other) {
this.other = other;
}
}

50
escheduler-common/src/main/java/cn/escheduler/common/job/db/DataSourceFactory.java

@ -0,0 +1,50 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.job.db;
import cn.escheduler.common.enums.DbType;
import cn.escheduler.common.utils.JSONUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* produce datasource in this custom defined datasource factory.
*/
public class DataSourceFactory {
private static final Logger logger = LoggerFactory.getLogger(DataSourceFactory.class);
public static BaseDataSource getDatasource(DbType dbType, String parameter) {
try {
switch (dbType) {
case MYSQL:
return JSONUtils.parseObject(parameter, MySQLDataSource.class);
case POSTGRESQL:
return JSONUtils.parseObject(parameter, PostgreDataSource.class);
case HIVE:
return JSONUtils.parseObject(parameter, HiveDataSource.class);
case SPARK:
return JSONUtils.parseObject(parameter, SparkDataSource.class);
default:
return null;
}
} catch (Exception e) {
logger.error("Get datasource object error", e);
return null;
}
}
}

77
escheduler-common/src/main/java/cn/escheduler/common/job/db/HiveDataSource.java

@ -0,0 +1,77 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.job.db;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
/**
* data source of hive
*/
public class HiveDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(HiveDataSource.class);
/**
* gets the JDBC url for the data source connection
* @return
*/
@Override
public String getJdbcUrl() {
String jdbcUrl = getAddress();
if (jdbcUrl.lastIndexOf("/") != (jdbcUrl.length() - 1)) {
jdbcUrl += "/";
}
jdbcUrl += getDatabase();
if (StringUtils.isNotEmpty(getOther())) {
jdbcUrl += ";" + getOther();
}
return jdbcUrl;
}
/**
* test whether the data source can be connected successfully
* @throws Exception
*/
@Override
public void isConnectable() throws Exception {
Connection con = null;
try {
Class.forName("org.apache.hive.jdbc.HiveDriver");
con = DriverManager.getConnection(getJdbcUrl(), getUser(), "");
} finally {
if (con != null) {
try {
con.close();
} catch (SQLException e) {
logger.error("Postgre datasource try conn close conn error", e);
throw e;
}
}
}
}
}

73
escheduler-common/src/main/java/cn/escheduler/common/job/db/MySQLDataSource.java

@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.job.db;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
/**
* data source of mySQL
*/
public class MySQLDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(MySQLDataSource.class);
/**
* gets the JDBC url for the data source connection
* @return
*/
@Override
public String getJdbcUrl() {
String address = getAddress();
if (address.lastIndexOf("/") != (address.length() - 1)) {
address += "/";
}
String jdbcUrl = address + getDatabase();
if (StringUtils.isNotEmpty(getOther())) {
jdbcUrl += "?" + getOther();
}
return jdbcUrl;
}
/**
* test whether the data source can be connected successfully
* @throws Exception
*/
@Override
public void isConnectable() throws Exception {
Connection con = null;
try {
Class.forName("com.mysql.jdbc.Driver");
con = DriverManager.getConnection(getJdbcUrl(), getUser(), getPassword());
} finally {
if (con != null) {
try {
con.close();
} catch (SQLException e) {
logger.error("Mysql datasource try conn close conn error", e);
throw e;
}
}
}
}
}

77
escheduler-common/src/main/java/cn/escheduler/common/job/db/PostgreDataSource.java

@ -0,0 +1,77 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.job.db;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
/**
* data source of postgreSQL
*/
public class PostgreDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(PostgreDataSource.class);
/**
* gets the JDBC url for the data source connection
* @return
*/
@Override
public String getJdbcUrl() {
String jdbcUrl = getAddress();
if (jdbcUrl.lastIndexOf("/") != (jdbcUrl.length() - 1)) {
jdbcUrl += "/";
}
jdbcUrl += getDatabase();
if (StringUtils.isNotEmpty(getOther())) {
jdbcUrl += "?" + getOther();
}
return jdbcUrl;
}
/**
* test whether the data source can be connected successfully
* @throws Exception
*/
@Override
public void isConnectable() throws Exception {
Connection con = null;
try {
Class.forName("org.postgresql.Driver");
con = DriverManager.getConnection(getJdbcUrl(), getUser(), getPassword());
} finally {
if (con != null) {
try {
con.close();
} catch (SQLException e) {
logger.error("Postgre datasource try conn close conn error", e);
throw e;
}
}
}
}
}

77
escheduler-common/src/main/java/cn/escheduler/common/job/db/SparkDataSource.java

@ -0,0 +1,77 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.job.db;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
/**
* data source of spark
*/
public class SparkDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(SparkDataSource.class);
/**
* gets the JDBC url for the data source connection
* @return
*/
@Override
public String getJdbcUrl() {
String jdbcUrl = getAddress();
if (jdbcUrl.lastIndexOf("/") != (jdbcUrl.length() - 1)) {
jdbcUrl += "/";
}
jdbcUrl += getDatabase();
if (StringUtils.isNotEmpty(getOther())) {
jdbcUrl += ";" + getOther();
}
return jdbcUrl;
}
/**
* test whether the data source can be connected successfully
* @throws Exception
*/
@Override
public void isConnectable() throws Exception {
Connection con = null;
try {
Class.forName("org.apache.hive.jdbc.HiveDriver");
con = DriverManager.getConnection(getJdbcUrl(), getUser(), "");
} finally {
if (con != null) {
try {
con.close();
} catch (SQLException e) {
logger.error("Spark datasource try conn close conn error", e);
throw e;
}
}
}
}
}

62
escheduler-common/src/main/java/cn/escheduler/common/model/DateInterval.java

@ -0,0 +1,62 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.model;
import java.util.Date;
/**
* date interval class
*/
public class DateInterval {
private Date startTime;
private Date endTime;
public DateInterval(Date beginTime, Date endTime){
this.startTime = beginTime;
this.endTime = endTime;
}
@Override
public boolean equals(Object obj) {
try{
DateInterval dateInterval = (DateInterval) obj;
return startTime.equals(dateInterval.getStartTime()) &&
endTime.equals(dateInterval.getEndTime());
}catch (Exception e){
return false;
}
}
public Date getStartTime() {
return startTime;
}
public void setStartTime(Date startTime) {
this.startTime = startTime;
}
public Date getEndTime() {
return endTime;
}
public void setEndTime(Date endTime) {
this.endTime = endTime;
}
}

80
escheduler-common/src/main/java/cn/escheduler/common/model/DependentItem.java

@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.model;
import cn.escheduler.common.enums.DependResult;
/**
* dependent item
*/
public class DependentItem {
private int definitionId;
private String depTasks;
private String cycle;
private String dateValue;
private DependResult dependResult;
public String getKey(){
return String.format("%d-%s-%s-%s",
getDefinitionId(),
getDepTasks(),
getCycle(),
getDateValue());
}
public int getDefinitionId() {
return definitionId;
}
public void setDefinitionId(int definitionId) {
this.definitionId = definitionId;
}
public String getDepTasks() {
return depTasks;
}
public void setDepTasks(String depTasks) {
this.depTasks = depTasks;
}
public String getCycle() {
return cycle;
}
public void setCycle(String cycle) {
this.cycle = cycle;
}
public String getDateValue() {
return dateValue;
}
public void setDateValue(String dateValue) {
this.dateValue = dateValue;
}
public DependResult getDependResult() {
return dependResult;
}
public void setDependResult(DependResult dependResult) {
this.dependResult = dependResult;
}
}

44
escheduler-common/src/main/java/cn/escheduler/common/model/DependentTaskModel.java

@ -0,0 +1,44 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.model;
import cn.escheduler.common.enums.DependentRelation;
import java.util.List;
public class DependentTaskModel {
private List<DependentItem> dependItemList;
private DependentRelation relation;
public List<DependentItem> getDependItemList() {
return dependItemList;
}
public void setDependItemList(List<DependentItem> dependItemList) {
this.dependItemList = dependItemList;
}
public DependentRelation getRelation() {
return relation;
}
public void setRelation(DependentRelation relation) {
this.relation = relation;
}
}

308
escheduler-common/src/main/java/cn/escheduler/common/model/TaskNode.java

@ -0,0 +1,308 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.model;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.Priority;
import cn.escheduler.common.enums.TaskTimeoutStrategy;
import cn.escheduler.common.task.TaskTimeoutParameter;
import cn.escheduler.common.utils.CollectionUtils;
import cn.escheduler.common.utils.JSONUtils;
import com.alibaba.fastjson.JSONObject;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import org.apache.commons.lang3.StringUtils;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
public class TaskNode {
/**
* task node id
*/
private String id;
/**
* task node name
*/
private String name;
/**
* task node description
*/
private String desc;
/**
* task node type
*/
private String type;
/**
* the run flag has two states, NORMAL or FORBIDDEN
*/
private String runFlag;
/**
* the front field
*/
private String loc;
/**
* maximum number of retries
*/
private int maxRetryTimes;
/**
* Unit of retry interval: points
*/
private int retryInterval;
/**
* params information
*/
@JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class)
@JsonSerialize(using = JSONUtils.JsonDataSerializer.class)
private String params;
/**
* inner dependency information
*/
@JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class)
@JsonSerialize(using = JSONUtils.JsonDataSerializer.class)
private String preTasks;
/**
* users store additional information
*/
@JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class)
@JsonSerialize(using = JSONUtils.JsonDataSerializer.class)
private String extras;
/**
* node dependency list
*/
private List<String> depList;
/**
* outer dependency information
*/
@JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class)
@JsonSerialize(using = JSONUtils.JsonDataSerializer.class)
private String dependence;
/**
* task instance priority
*/
private Priority taskInstancePriority;
/**
* task time out
*/
@JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class)
@JsonSerialize(using = JSONUtils.JsonDataSerializer.class)
private String timeout;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDesc() {
return desc;
}
public void setDesc(String desc) {
this.desc = desc;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getParams() {
return params;
}
public void setParams(String params) {
this.params = params;
}
public String getPreTasks() {
return preTasks;
}
public void setPreTasks(String preTasks) throws IOException {
this.preTasks = preTasks;
this.depList = JSONUtils.toList(preTasks, String.class);
}
public String getExtras() {
return extras;
}
public void setExtras(String extras) {
this.extras = extras;
}
public List<String> getDepList() {
return depList;
}
public void setDepList(List<String> depList) throws JsonProcessingException {
this.depList = depList;
this.preTasks = JSONUtils.toJson(depList);
}
public String getLoc() {
return loc;
}
public void setLoc(String loc) {
this.loc = loc;
}
public String getRunFlag(){
return runFlag;
}
public void setRunFlag(String runFlag) {
this.runFlag = runFlag;
}
public Boolean isForbidden(){
return (StringUtils.isNotEmpty(this.runFlag) &&
this.runFlag.equals(Constants.FLOWNODE_RUN_FLAG_FORBIDDEN));
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TaskNode taskNode = (TaskNode) o;
return Objects.equals(name, taskNode.name) &&
Objects.equals(desc, taskNode.desc) &&
Objects.equals(type, taskNode.type) &&
Objects.equals(params, taskNode.params) &&
Objects.equals(preTasks, taskNode.preTasks) &&
Objects.equals(extras, taskNode.extras) &&
Objects.equals(runFlag, taskNode.runFlag) &&
Objects.equals(dependence, taskNode.dependence) &&
CollectionUtils.equalLists(depList, taskNode.depList);
}
@Override
public int hashCode() {
return Objects.hash(name, desc, type, params, preTasks, extras, depList, runFlag);
}
public String getDependence() {
return dependence;
}
public void setDependence(String dependence) {
this.dependence = dependence;
}
public int getMaxRetryTimes() {
return maxRetryTimes;
}
public void setMaxRetryTimes(int maxRetryTimes) {
this.maxRetryTimes = maxRetryTimes;
}
public int getRetryInterval() {
return retryInterval;
}
public void setRetryInterval(int retryInterval) {
this.retryInterval = retryInterval;
}
public Priority getTaskInstancePriority() {
return taskInstancePriority;
}
public void setTaskInstancePriority(Priority taskInstancePriority) {
this.taskInstancePriority = taskInstancePriority;
}
public String getTimeout() {
return timeout;
}
public void setTimeout(String timeout) {
this.timeout = timeout;
}
/**
* get task time out parameter
* @return
*/
public TaskTimeoutParameter getTaskTimeoutParameter() {
if(StringUtils.isNotEmpty(this.getTimeout())){
String formatStr = String.format("%s,%s", TaskTimeoutStrategy.WARN.name(), TaskTimeoutStrategy.FAILED.name());
String timeout = this.getTimeout().replace(formatStr,TaskTimeoutStrategy.WARNFAILED.name());
return JSONObject.parseObject(timeout,TaskTimeoutParameter.class);
}
return new TaskTimeoutParameter(false);
}
@Override
public String toString() {
return "TaskNode{" +
"id='" + id + '\'' +
", name='" + name + '\'' +
", desc='" + desc + '\'' +
", type='" + type + '\'' +
", runFlag='" + runFlag + '\'' +
", loc='" + loc + '\'' +
", maxRetryTimes=" + maxRetryTimes +
", retryInterval=" + retryInterval +
", params='" + params + '\'' +
", preTasks='" + preTasks + '\'' +
", extras='" + extras + '\'' +
", depList=" + depList +
", dependence='" + dependence + '\'' +
", taskInstancePriority=" + taskInstancePriority +
", timeout='" + timeout + '\'' +
'}';
}
}

67
escheduler-common/src/main/java/cn/escheduler/common/model/TaskNodeRelation.java

@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.model;
public class TaskNodeRelation {
/**
* task start node name
*/
private String startNode;
/**
* task end node name
*/
private String endNode;
public TaskNodeRelation() {
}
public TaskNodeRelation(String startNode, String endNode) {
this.startNode = startNode;
this.endNode = endNode;
}
public String getStartNode() {
return startNode;
}
public void setStartNode(String startNode) {
this.startNode = startNode;
}
public String getEndNode() {
return endNode;
}
public void setEndNode(String endNode) {
this.endNode = endNode;
}
public boolean equals(TaskNodeRelation e){
return (e.getStartNode() == this.startNode && e.getEndNode() == this.endNode);
}
@Override
public String toString() {
return "TaskNodeRelation{" +
"startNode='" + startNode + '\'' +
", endNode='" + endNode + '\'' +
'}';
}
}

85
escheduler-common/src/main/java/cn/escheduler/common/process/ProcessDag.java

@ -0,0 +1,85 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.process;
import cn.escheduler.common.model.TaskNode;
import cn.escheduler.common.model.TaskNodeRelation;
import java.util.List;
public class ProcessDag {
/**
* DAG edge list
**/
private List<TaskNodeRelation> edges;
/**
* DAG node list
*/
private List<TaskNode> nodes;
/**
* getter method
*
* @return the edges
* @see ProcessDag#edges
*/
public List<TaskNodeRelation> getEdges() {
return edges;
}
/**
* setter method
*
* @param edges the edges to set
* @see ProcessDag#edges
*/
public void setEdges(List<TaskNodeRelation> edges) {
this.edges = edges;
}
/**
* getter method
*
* @return the nodes
* @see ProcessDag#nodes
*/
public List<TaskNode> getNodes() {
return nodes;
}
/**
* setter method
*
* @param nodes the nodes to set
* @see ProcessDag#nodes
*/
public void setNodes(List<TaskNode> nodes) {
this.nodes = nodes;
}
@Override
public String toString() {
return "ProcessDag{" +
"edges=" + edges +
", nodes=" + nodes +
'}';
}
}

143
escheduler-common/src/main/java/cn/escheduler/common/process/Property.java

@ -0,0 +1,143 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.process;
import cn.escheduler.common.enums.DataType;
import cn.escheduler.common.enums.Direct;
import java.util.Objects;
public class Property {
/**
* key
*/
private String prop;
/**
* input/output
*/
private Direct direct;
/**
* data type
*/
private DataType type;
/**
* value
*/
private String value;
public Property() {
}
public Property(String prop,Direct direct,DataType type,String value) {
this.prop = prop;
this.direct = direct;
this.type = type;
this.value = value;
}
/**
* getter method
*
* @return the prop
* @see Property#prop
*/
public String getProp() {
return prop;
}
/**
* setter method
*
* @param prop the prop to set
* @see Property#prop
*/
public void setProp(String prop) {
this.prop = prop;
}
/**
* getter method
*
* @return the value
* @see Property#value
*/
public String getValue() {
return value;
}
/**
* setter method
*
* @param value the value to set
* @see Property#value
*/
public void setValue(String value) {
this.value = value;
}
public Direct getDirect() {
return direct;
}
public void setDirect(Direct direct) {
this.direct = direct;
}
public DataType getType() {
return type;
}
public void setType(DataType type) {
this.type = type;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Property property = (Property) o;
return Objects.equals(prop, property.prop) &&
Objects.equals(value, property.value);
}
@Override
public int hashCode() {
return Objects.hash(prop, value);
}
@Override
public String toString() {
return "Property{" +
"prop='" + prop + '\'' +
", direct=" + direct +
", type=" + type +
", value='" + value + '\'' +
'}';
}
}

37
escheduler-common/src/main/java/cn/escheduler/common/process/ResourceInfo.java

@ -0,0 +1,37 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.process;
/**
* resource info
*/
public class ResourceInfo {
/**
* res the name of the resource that was uploaded
*/
private String res;
public String getRes() {
return res;
}
public void setRes(String res) {
this.res = res;
}
}

91
escheduler-common/src/main/java/cn/escheduler/common/queue/ITaskQueue.java

@ -0,0 +1,91 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.queue;
import java.util.List;
import java.util.Set;
public interface ITaskQueue {
/**
* take out all the elements
*
* this method has deprecated
* use checkTaskExists instead
*
* @param key
* @return
*/
@Deprecated
List<String> getAllTasks(String key);
/**
* check task exists in the task queue or not
*
* @param key queue name
* @param task ${priority}_${processInstanceId}_${taskId}
* @return true if exists in the queue
*/
boolean checkTaskExists(String key, String task);
/**
* add an element to the queue
*
* @param key queue name
* @param value
*/
void add(String key, String value);
/**
* an element pops out of the queue
*
* @param key queue name
* @return
*/
String poll(String key);
/**
* add an element to the set
*
* @param key
* @param value
*/
void sadd(String key, String value);
/**
* delete the value corresponding to the key in the set
*
* @param key
* @param value
*/
void srem(String key, String value);
/**
* gets all the elements of the set based on the key
*
* @param key
* @return
*/
Set<String> smembers(String key);
/**
* clear the task queue for use by junit tests only
*/
void delete();
}

61
escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueFactory.java

@ -0,0 +1,61 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.queue;
import cn.escheduler.common.utils.CommonUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* task queue factory
*/
public class TaskQueueFactory {
private static final Logger logger = LoggerFactory.getLogger(TaskQueueFactory.class);
private TaskQueueFactory(){
}
/**
* get instance (singleton)
*
* @return instance
*/
public static ITaskQueue getTaskQueueInstance() {
String queueImplValue = CommonUtils.getQueueImplValue();
if (StringUtils.isNotBlank(queueImplValue)) {
// queueImplValue = StringUtils.trim(queueImplValue);
// if (SCHEDULER_QUEUE_REDIS_IMPL.equals(queueImplValue)) {
// logger.info("task queue impl use reids ");
// return TaskQueueRedisImpl.getInstance();
// } else {
logger.info("task queue impl use zookeeper ");
return TaskQueueZkImpl.getInstance();
// }
}else{
logger.error("property escheduler.queue.impl can't be blank ");
System.exit(-1);
}
return null;
}
}

365
escheduler-common/src/main/java/cn/escheduler/common/queue/TaskQueueZkImpl.java

@ -0,0 +1,365 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.queue;
import cn.escheduler.common.Constants;
import cn.escheduler.common.utils.Bytes;
import cn.escheduler.common.zk.AbstractZKClient;
import org.apache.curator.framework.CuratorFramework;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* A singleton of a task queue implemented with zookeeper
* tasks queue implemention
*/
public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
private static final Logger logger = LoggerFactory.getLogger(TaskQueueZkImpl.class);
private static TaskQueueZkImpl instance;
private TaskQueueZkImpl(){
init();
}
public static TaskQueueZkImpl getInstance(){
if (null == instance) {
synchronized (TaskQueueZkImpl.class) {
if(null == instance) {
instance = new TaskQueueZkImpl();
}
}
}
return instance;
}
/**
* get all tasks from tasks queue
* @param key task queue name
* @return
*/
@Deprecated
@Override
public List<String> getAllTasks(String key) {
try {
List<String> list = getZkClient().getChildren().forPath(getTasksPath(key));
return list;
} catch (Exception e) {
logger.error("get all tasks from tasks queue exception",e);
}
return new ArrayList<String>();
}
/**
* check task exists in the task queue or not
*
* @param key queue name
* @param task ${priority}_${processInstanceId}_${taskId}
* @return true if exists in the queue
*/
@Override
public boolean checkTaskExists(String key, String task) {
String taskPath = getTasksPath(key) + Constants.SINGLE_SLASH + task;
try {
Stat stat = zkClient.checkExists().forPath(taskPath);
if(null == stat){
logger.info("check task:{} not exist in task queue",task);
return false;
}else{
logger.info("check task {} exists in task queue ",task);
return true;
}
} catch (Exception e) {
logger.info(String.format("task {} check exists in task queue exception ", task), e);
}
return false;
}
/**
* add task to tasks queue
*
* @param key task queue name
* @param value ${priority}_${processInstanceId}_${taskId}
*/
@Override
public void add(String key, String value) {
try {
String taskIdPath = getTasksPath(key) + Constants.SINGLE_SLASH + value;
String result = getZkClient().create().withMode(CreateMode.PERSISTENT).forPath(taskIdPath, Bytes.toBytes(value));
// String path = conf.getString(Constants.ZOOKEEPER_SCHEDULER_ROOT) + Constants.SINGLE_SLASH + Constants.SCHEDULER_TASKS_QUEUE + "_add" + Constants.SINGLE_SLASH + value;
// getZkClient().create().creatingParentContainersIfNeeded().withMode(CreateMode.PERSISTENT).forPath(path,
// Bytes.toBytes(value));
logger.info("add task : {} to tasks queue , result success",result);
} catch (Exception e) {
logger.error("add task to tasks queue exception",e);
}
}
/**
* An element pops out of the queue <p>
* note:
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
* The tasks with the highest priority are selected by comparing the priorities of the above four levels from high to low.
*
* 流程实例优先级_流程实例id_任务优先级_任务id high <- low
* @param key task queue name
* @return the task id to be executed
*/
@Override
public String poll(String key) {
try{
CuratorFramework zk = getZkClient();
String tasksQueuePath = getTasksPath(key) + Constants.SINGLE_SLASH;
List<String> list = zk.getChildren().forPath(getTasksPath(key));
if(list != null && list.size() > 0){
int size = list.size();
String formatTargetTask = null;
String targetTaskKey = null;
for (int i = 0; i < size; i++) {
String taskDetail = list.get(i);
String[] taskDetailArrs = taskDetail.split(Constants.UNDERLINE);
if(taskDetailArrs.length == 4){
//format ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
String formatTask = String.format("%s_%010d_%s_%010d", taskDetailArrs[0], Long.parseLong(taskDetailArrs[1]), taskDetailArrs[2], Long.parseLong(taskDetailArrs[3]));
if(i > 0){
int result = formatTask.compareTo(formatTargetTask);
if(result < 0){
formatTargetTask = formatTask;
targetTaskKey = taskDetail;
}
}else{
formatTargetTask = formatTask;
targetTaskKey = taskDetail;
}
}else{
logger.error("task queue poll error, task detail :{} , please check!", taskDetail);
}
}
if(formatTargetTask != null){
String taskIdPath = tasksQueuePath + targetTaskKey;
logger.info("consume task {}", taskIdPath);
String[] vals = targetTaskKey.split(Constants.UNDERLINE);
try{
zk.delete().forPath(taskIdPath);
// String path = conf.getString(Constants.ZOOKEEPER_SCHEDULER_ROOT) + Constants.SINGLE_SLASH + Constants.SCHEDULER_TASKS_QUEUE + "_remove" + Constants.SINGLE_SLASH + targetTaskKey;
// getZkClient().create().creatingParentContainersIfNeeded().withMode(CreateMode.PERSISTENT).forPath(path,
// Bytes.toBytes(targetTaskKey));
}catch(Exception e){
logger.error(String.format("delete task:%s from zookeeper fail, task detail: %s exception" ,targetTaskKey, vals[vals.length - 1]) ,e);
}
logger.info("consume task: {},there still have {} tasks need to be executed", targetTaskKey, size - 1);
return vals[vals.length - 1];
}else{
logger.error("should not go here, task queue poll error, please check!");
}
}
} catch (Exception e) {
logger.error("add task to tasks queue exception",e);
}
return null;
}
/**
* In order to be compatible with redis implementation
*
* To be compatible with the redis implementation, add an element to the set
* @param key The key is the kill/cancel queue path name
* @param value host-taskId The name of the zookeeper node
*/
@Override
public void sadd(String key,String value) {
try {
if(value != null && value.trim().length() > 0){
String path = getTasksPath(key) + Constants.SINGLE_SLASH;
CuratorFramework zk = getZkClient();
Stat stat = zk.checkExists().forPath(path + value);
if(null == stat){
String result = zk.create().withMode(CreateMode.PERSISTENT).forPath(path + value,Bytes.toBytes(value));
logger.info("add task:{} to tasks set result:{} ",value,result);
}else{
logger.info("task {} exists in tasks set ",value);
}
}else{
logger.warn("add host-taskId:{} to tasks set is empty ",value);
}
} catch (Exception e) {
logger.error("add task to tasks set exception",e);
}
}
/**
* delete the value corresponding to the key in the set
* @param key The key is the kill/cancel queue path name
* @param value host-taskId-taskType The name of the zookeeper node
*/
@Override
public void srem(String key, String value) {
try{
String path = getTasksPath(key) + Constants.SINGLE_SLASH;
CuratorFramework zk = getZkClient();
Stat stat = zk.checkExists().forPath(path + value);
if(null != stat){
zk.delete().forPath(path + value);
logger.info("delete task:{} from tasks set ",value);
}else{
logger.info("delete task:{} from tasks set fail, there is no this task",value);
}
}catch(Exception e){
logger.error(String.format("delete task:" + value + " exception"),e);
}
}
/**
* Gets all the elements of the set based on the key
* @param key The key is the kill/cancel queue path name
* @return
*/
@Override
public Set<String> smembers(String key) {
Set<String> tasksSet = new HashSet<>();
try {
List<String> list = getZkClient().getChildren().forPath(getTasksPath(key));
for (String task : list) {
tasksSet.add(task);
}
return tasksSet;
} catch (Exception e) {
logger.error("get all tasks from tasks queue exception",e);
}
return tasksSet;
}
/**
* Init the task queue of zookeeper node
*/
private void init(){
try {
String tasksQueuePath = getTasksPath(Constants.SCHEDULER_TASKS_QUEUE);
String tasksCancelPath = getTasksPath(Constants.SCHEDULER_TASKS_KILL);
for(String taskQueuePath : new String[]{tasksQueuePath,tasksCancelPath}){
if(zkClient.checkExists().forPath(taskQueuePath) == null){
// create a persistent parent node
zkClient.create().creatingParentContainersIfNeeded()
.withMode(CreateMode.PERSISTENT).forPath(taskQueuePath);
logger.info("create tasks queue parent node success : {} ",taskQueuePath);
}
}
} catch (Exception e) {
logger.error("create zk node failure",e);
}
}
/**
* Clear the task queue of zookeeper node
*/
@Override
public void delete(){
try {
String tasksQueuePath = getTasksPath(Constants.SCHEDULER_TASKS_QUEUE);
String tasksCancelPath = getTasksPath(Constants.SCHEDULER_TASKS_KILL);
for(String taskQueuePath : new String[]{tasksQueuePath,tasksCancelPath}){
if(zkClient.checkExists().forPath(taskQueuePath) != null){
List<String> list = zkClient.getChildren().forPath(taskQueuePath);
for (String task : list) {
zkClient.delete().forPath(taskQueuePath + Constants.SINGLE_SLASH + task);
logger.info("delete task from tasks queue : {}/{} ",taskQueuePath,task);
}
}
}
} catch (Exception e) {
logger.error("delete all tasks in tasks queue failure",e);
}
}
/**
* get zookeeper client of CuratorFramework
* @return
*/
public CuratorFramework getZkClient() {
return zkClient;
}
/**
* Get the task queue path
* @param key task queue name
* @return
*/
public String getTasksPath(String key){
return conf.getString(Constants.ZOOKEEPER_SCHEDULER_ROOT) + Constants.SINGLE_SLASH + key;
}
}

341
escheduler-common/src/main/java/cn/escheduler/common/shell/AbstractShell.java

@ -0,0 +1,341 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.shell;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Map;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* A base class for running a Unix command.
*
* <code>AbstractShell</code> can be used to run unix commands like <code>du</code> or
* <code>df</code>. It also offers facilities to gate commands by
* time-intervals.
*/
public abstract class AbstractShell {
private static final Logger logger = LoggerFactory.getLogger(AbstractShell.class);
/**
* Time after which the executing script would be timedout
*/
protected long timeOutInterval = 0L;
/**
* If or not script timed out
*/
private AtomicBoolean timedOut;
/**
* refresh interval in msec
*/
private long interval;
/**
* last time the command was performed
*/
private long lastTime;
/**
* env for the command execution
*/
private Map<String, String> environment;
private File dir;
/**
* sub process used to execute the command
*/
private Process process;
private int exitCode;
/**
* If or not script finished executing
*/
private volatile AtomicBoolean completed;
public AbstractShell() {
this(0L);
}
/**
* @param interval the minimum duration to wait before re-executing the
* command.
*/
public AbstractShell(long interval ) {
this.interval = interval;
this.lastTime = (interval<0) ? 0 : -interval;
}
/**
* set the environment for the command
* @param env Mapping of environment variables
*/
protected void setEnvironment(Map<String, String> env) {
this.environment = env;
}
/**
* set the working directory
* @param dir The directory where the command would be executed
*/
protected void setWorkingDirectory(File dir) {
this.dir = dir;
}
/**
* check to see if a command needs to be executed and execute if needed
*/
protected void run() throws IOException {
if (lastTime + interval > System.currentTimeMillis()) {
return;
}
// reset for next run
exitCode = 0;
runCommand();
}
/**
* Run a command actual work
*/
private void runCommand() throws IOException {
ProcessBuilder builder = new ProcessBuilder(getExecString());
Timer timeOutTimer = null;
ShellTimeoutTimerTask timeoutTimerTask = null;
timedOut = new AtomicBoolean(false);
completed = new AtomicBoolean(false);
if (environment != null) {
builder.environment().putAll(this.environment);
}
if (dir != null) {
builder.directory(this.dir);
}
process = builder.start();
ProcessContainer.putProcess(process);
if (timeOutInterval > 0) {
timeOutTimer = new Timer();
timeoutTimerTask = new ShellTimeoutTimerTask(
this);
//One time scheduling.
timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
}
final BufferedReader errReader =
new BufferedReader(new InputStreamReader(process
.getErrorStream()));
BufferedReader inReader =
new BufferedReader(new InputStreamReader(process
.getInputStream()));
final StringBuffer errMsg = new StringBuffer();
// read error and input streams as this would free up the buffers
// free the error stream buffer
Thread errThread = new Thread() {
@Override
public void run() {
try {
String line = errReader.readLine();
while((line != null) && !isInterrupted()) {
errMsg.append(line);
errMsg.append(System.getProperty("line.separator"));
line = errReader.readLine();
}
} catch(IOException ioe) {
logger.warn("Error reading the error stream", ioe);
}
}
};
try {
errThread.start();
} catch (IllegalStateException ise) { }
try {
// parse the output
parseExecResult(inReader);
exitCode = process.waitFor();
try {
// make sure that the error thread exits
errThread.join();
} catch (InterruptedException ie) {
logger.warn("Interrupted while reading the error stream", ie);
}
completed.set(true);
//the timeout thread handling
//taken care in finally block
if (exitCode != 0) {
throw new ExitCodeException(exitCode, errMsg.toString());
}
} catch (InterruptedException ie) {
throw new IOException(ie.toString());
} finally {
if ((timeOutTimer!=null) && !timedOut.get()) {
timeOutTimer.cancel();
}
// close the input stream
try {
inReader.close();
} catch (IOException ioe) {
logger.warn("Error while closing the input stream", ioe);
}
if (!completed.get()) {
errThread.interrupt();
}
try {
errReader.close();
} catch (IOException ioe) {
logger.warn("Error while closing the error stream", ioe);
}
ProcessContainer.removeProcess(process);
process.destroy();
lastTime = System.currentTimeMillis();
}
}
/**
* return an array containing the command name & its parameters
* */
protected abstract String[] getExecString();
/**
* Parse the execution result
* */
protected abstract void parseExecResult(BufferedReader lines)
throws IOException;
/**
* get the current sub-process executing the given command
* @return process executing the command
*/
public Process getProcess() {
return process;
}
/** get the exit code
* @return the exit code of the process
*/
public int getExitCode() {
return exitCode;
}
/**
* Set if the command has timed out.
*
*/
private void setTimedOut() {
this.timedOut.set(true);
}
/**
* Timer which is used to timeout scripts spawned off by shell.
*/
private static class ShellTimeoutTimerTask extends TimerTask {
private AbstractShell shell;
public ShellTimeoutTimerTask(AbstractShell shell) {
this.shell = shell;
}
@Override
public void run() {
Process p = shell.getProcess();
try {
p.exitValue();
} catch (Exception e) {
//Process has not terminated.
//So check if it has completed
//if not just destroy it.
if (p != null && !shell.completed.get()) {
shell.setTimedOut();
p.destroy();
}
}
}
}
/**
* This is an IOException with exit code added.
*/
public static class ExitCodeException extends IOException {
int exitCode;
public ExitCodeException(int exitCode, String message) {
super(message);
this.exitCode = exitCode;
}
public int getExitCode() {
return exitCode;
}
}
/**
* process manage container
*
*/
public static class ProcessContainer extends ConcurrentHashMap<Integer, Process>{
private static final ProcessContainer container = new ProcessContainer();
private ProcessContainer(){
super();
}
public static final ProcessContainer getInstance(){
return container;
}
public static void putProcess(Process process){
getInstance().put(process.hashCode(), process);
}
public static int processSize(){
return getInstance().size();
}
public static void removeProcess(Process process){
getInstance().remove(process.hashCode());
}
public static void destroyAllProcess(){
Set<Entry<Integer, Process>> set = getInstance().entrySet();
for (Entry<Integer, Process> entry : set) {
try{
entry.getValue().destroy();
} catch (Exception e) {
e.printStackTrace();
}
}
logger.info("close " + set.size() + " executing process tasks");
}
}
}

175
escheduler-common/src/main/java/cn/escheduler/common/shell/ShellExecutor.java

@ -0,0 +1,175 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.shell;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.util.Map;
/**
* shell command executor.
*
* <code>ShellExecutor</code> should be used in cases where the output
* of the command needs no explicit parsing and where the command, working
* directory and the environment remains unchanged. The output of the command
* is stored as-is and is expected to be small.
*/
public class ShellExecutor extends AbstractShell {
private String[] command;
private StringBuffer output;
public ShellExecutor(String... execString) {
this(execString, null);
}
public ShellExecutor(String[] execString, File dir) {
this(execString, dir, null);
}
public ShellExecutor(String[] execString, File dir,
Map<String, String> env) {
this(execString, dir, env , 0L);
}
/**
* Create a new instance of the ShellExecutor to execute a command.
*
* @param execString The command to execute with arguments
* @param dir If not-null, specifies the directory which should be set
* as the current working directory for the command.
* If null, the current working directory is not modified.
* @param env If not-null, environment of the command will include the
* key-value pairs specified in the map. If null, the current
* environment is not modified.
* @param timeout Specifies the time in milliseconds, after which the
* command will be killed and the status marked as timedout.
* If 0, the command will not be timed out.
*/
public ShellExecutor(String[] execString, File dir,
Map<String, String> env, long timeout) {
command = execString.clone();
if (dir != null) {
setWorkingDirectory(dir);
}
if (env != null) {
setEnvironment(env);
}
timeOutInterval = timeout;
}
/**
* Static method to execute a shell command.
* Covers most of the simple cases without requiring the user to implement
* the <code>AbstractShell</code> interface.
* @param cmd shell command to execute.
* @return the output of the executed command.
*/
public static String execCommand(String... cmd) throws IOException {
return execCommand(null, cmd, 0L);
}
/**
* Static method to execute a shell command.
* Covers most of the simple cases without requiring the user to implement
* the <code>AbstractShell</code> interface.
* @param env the map of environment key=value
* @param cmd shell command to execute.
* @param timeout time in milliseconds after which script should be marked timeout
* @return the output of the executed command.o
*/
public static String execCommand(Map<String, String> env, String[] cmd,
long timeout) throws IOException {
ShellExecutor exec = new ShellExecutor(cmd, null, env,
timeout);
exec.execute();
return exec.getOutput();
}
/**
* Static method to execute a shell command.
* Covers most of the simple cases without requiring the user to implement
* the <code>AbstractShell</code> interface.
* @param env the map of environment key=value
* @param cmd shell command to execute.
* @return the output of the executed command.
*/
public static String execCommand(Map<String,String> env, String ... cmd)
throws IOException {
return execCommand(env, cmd, 0L);
}
/**
* Execute the shell command
*
*/
public void execute() throws IOException {
this.run();
}
@Override
protected String[] getExecString() {
return command;
}
@Override
protected void parseExecResult(BufferedReader lines) throws IOException {
output = new StringBuffer();
char[] buf = new char[1024];
int nRead;
String line = "";
while ( (nRead = lines.read(buf, 0, buf.length)) > 0 ) {
line = new String(buf,0,nRead);
}
output.append(line);
}
/**
*
* Get the output of the shell command
*/
public String getOutput() {
return (output == null) ? "" : output.toString();
}
/**
* Returns the commands of this instance.
* Arguments with spaces in are presented with quotes round; other
* arguments are presented raw
*
* @return a string representation of the object
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
String[] args = getExecString();
for (String s : args) {
if (s.indexOf(' ') >= 0) {
builder.append('"').append(s).append('"');
} else {
builder.append(s);
}
builder.append(' ');
}
return builder.toString();
}
}

69
escheduler-common/src/main/java/cn/escheduler/common/task/AbstractParameters.java

@ -0,0 +1,69 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task;
import cn.escheduler.common.process.Property;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* job params related class
*/
public abstract class AbstractParameters implements IParameters {
@Override
public abstract boolean checkParameters();
@Override
public abstract List<String> getResourceFilesList();
/**
* local parameters
*/
public List<Property> localParams;
/**
* get local parameters list
* @return
*/
public List<Property> getLocalParams() {
return localParams;
}
public void setLocalParams(List<Property> localParams) {
this.localParams = localParams;
}
/**
* get local parameters map
* @return
*/
public Map<String,Property> getLocalParametersMap() {
if (localParams != null) {
Map<String,Property> localParametersMaps = new LinkedHashMap<>();
for (Property property : localParams) {
localParametersMaps.put(property.getProp(),property);
}
return localParametersMaps;
}
return null;
}
}

38
escheduler-common/src/main/java/cn/escheduler/common/task/IParameters.java

@ -0,0 +1,38 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task;
import java.util.List;
/**
* job params interface
*/
public interface IParameters {
/**
* check parameters is valid
*
* @return
*/
boolean checkParameters();
/**
* get project resource files list
*
* @return resource files list
*/
List<String> getResourceFilesList();
}

81
escheduler-common/src/main/java/cn/escheduler/common/task/TaskTimeoutParameter.java

@ -0,0 +1,81 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task;
import cn.escheduler.common.enums.TaskTimeoutStrategy;
/**
* task timeout parameter
*/
public class TaskTimeoutParameter {
private boolean enable;
/**
* task timeout strategy
*/
private TaskTimeoutStrategy strategy;
/**
* task timeout interval
*/
private int interval;
public boolean getEnable() {
return enable;
}
public void setEnable(boolean enable) {
this.enable = enable;
}
public TaskTimeoutStrategy getStrategy() {
return strategy;
}
public void setStrategy(TaskTimeoutStrategy strategy) {
this.strategy = strategy;
}
public int getInterval() {
return interval;
}
public void setInterval(int interval) {
this.interval = interval;
}
public TaskTimeoutParameter() {
}
public TaskTimeoutParameter(boolean enable) {
this.enable = enable;
}
public TaskTimeoutParameter(boolean enable, TaskTimeoutStrategy strategy, int interval) {
this.enable = enable;
this.strategy = strategy;
this.interval = interval;
}
@Override
public String toString() {
return "TaskTimeoutParameter{" +
"enable=" + enable +
", strategy=" + strategy +
", interval=" + interval +
'}';
}
}

58
escheduler-common/src/main/java/cn/escheduler/common/task/dependent/DependentParameters.java

@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.dependent;
import cn.escheduler.common.enums.DependentRelation;
import cn.escheduler.common.model.DependentTaskModel;
import cn.escheduler.common.task.AbstractParameters;
import java.util.ArrayList;
import java.util.List;
public class DependentParameters extends AbstractParameters {
private List<DependentTaskModel> dependTaskList;
private DependentRelation relation;
@Override
public boolean checkParameters() {
return true;
}
@Override
public List<String> getResourceFilesList() {
return new ArrayList<>();
}
public List<DependentTaskModel> getDependTaskList() {
return dependTaskList;
}
public void setDependTaskList(List<DependentTaskModel> dependTaskList) {
this.dependTaskList = dependTaskList;
}
public DependentRelation getRelation() {
return relation;
}
public void setRelation(DependentRelation relation) {
this.relation = relation;
}
}

145
escheduler-common/src/main/java/cn/escheduler/common/task/mr/MapreduceParameters.java

@ -0,0 +1,145 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.mr;
import cn.escheduler.common.enums.ProgramType;
import cn.escheduler.common.process.ResourceInfo;
import cn.escheduler.common.task.AbstractParameters;
import java.util.List;
import java.util.stream.Collectors;
public class MapreduceParameters extends AbstractParameters {
/**
* major jar
*/
private ResourceInfo mainJar;
/**
* major class
*/
private String mainClass;
/**
* arguments
*/
private String mainArgs;
/**
* other arguments
*/
private String others;
/**
* queue
*/
private String queue;
/**
* resource list
*/
private List<ResourceInfo> resourceList;
/**
* program type
* 0 JAVA,1 SCALA,2 PYTHON
*/
private ProgramType programType;
public String getMainClass() {
return mainClass;
}
public void setMainClass(String mainClass) {
this.mainClass = mainClass;
}
public String getMainArgs() {
return mainArgs;
}
public void setMainArgs(String mainArgs) {
this.mainArgs = mainArgs;
}
public String getOthers() {
return others;
}
public void setOthers(String others) {
this.others = others;
}
public String getQueue() {
return queue;
}
public void setQueue(String queue) {
this.queue = queue;
}
public List<ResourceInfo> getResourceList() {
return this.resourceList;
}
public void setResourceList(List<ResourceInfo> resourceList) {
this.resourceList = resourceList;
}
public void setMainJar(ResourceInfo mainJar) {
this.mainJar = mainJar;
}
public ResourceInfo getMainJar() {
return mainJar;
}
public ProgramType getProgramType() {
return programType;
}
public void setProgramType(ProgramType programType) {
this.programType = programType;
}
@Override
public boolean checkParameters() {
return this.mainJar != null && this.programType != null;
}
@Override
public List<String> getResourceFilesList() {
if (resourceList != null) {
this.resourceList.add(mainJar);
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
}
@Override
public String toString() {
return "mainJar= " + mainJar
+ "mainClass=" + mainClass
+ "mainArgs=" + mainArgs
+ "queue=" + queue
+ "other mainArgs=" + others
;
}
}

89
escheduler-common/src/main/java/cn/escheduler/common/task/procedure/ProcedureParameters.java

@ -0,0 +1,89 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.procedure;
import cn.escheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.List;
/**
* procedure parameter
*/
public class ProcedureParameters extends AbstractParameters {
/**
* data source typeeg MYSQL, POSTGRES, HIVE ...
*/
private String type;
/**
* data source id
*/
private int datasource;
/**
* procedure name
*/
private String method;
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public int getDatasource() {
return datasource;
}
public void setDatasource(int datasource) {
this.datasource = datasource;
}
public String getMethod() {
return method;
}
public void setMethod(String method) {
this.method = method;
}
@Override
public boolean checkParameters() {
return datasource != 0 && StringUtils.isNotEmpty(type) && StringUtils.isNotEmpty(method);
}
@Override
public List<String> getResourceFilesList() {
return new ArrayList<>();
}
@Override
public String toString() {
return "ProcessdureParam{" +
"type='" + type + '\'' +
", datasource=" + datasource +
", method='" + method + '\'' +
'}';
}
}

67
escheduler-common/src/main/java/cn/escheduler/common/task/python/PythonParameters.java

@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.python;
import cn.escheduler.common.process.ResourceInfo;
import cn.escheduler.common.task.AbstractParameters;
import java.util.List;
import java.util.stream.Collectors;
public class PythonParameters extends AbstractParameters {
/**
* origin python script
*/
private String rawScript;
/**
* resource list
*/
private List<ResourceInfo> resourceList;
public String getRawScript() {
return rawScript;
}
public void setRawScript(String rawScript) {
this.rawScript = rawScript;
}
public List<ResourceInfo> getResourceList() {
return resourceList;
}
public void setResourceList(List<ResourceInfo> resourceList) {
this.resourceList = resourceList;
}
@Override
public boolean checkParameters() {
return rawScript != null && !rawScript.isEmpty();
}
@Override
public List<String> getResourceFilesList() {
if (resourceList != null) {
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
}
}

70
escheduler-common/src/main/java/cn/escheduler/common/task/shell/ShellParameters.java

@ -0,0 +1,70 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.shell;
import cn.escheduler.common.process.ResourceInfo;
import cn.escheduler.common.task.AbstractParameters;
import java.util.List;
import java.util.stream.Collectors;
/**
* shell parameters
*/
public class ShellParameters extends AbstractParameters {
/**
* shell script
*/
private String rawScript;
/**
* resource list
*/
private List<ResourceInfo> resourceList;
public String getRawScript() {
return rawScript;
}
public void setRawScript(String rawScript) {
this.rawScript = rawScript;
}
public List<ResourceInfo> getResourceList() {
return resourceList;
}
public void setResourceList(List<ResourceInfo> resourceList) {
this.resourceList = resourceList;
}
@Override
public boolean checkParameters() {
return rawScript != null && !rawScript.isEmpty();
}
@Override
public List<String> getResourceFilesList() {
if (resourceList != null) {
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
}
}

220
escheduler-common/src/main/java/cn/escheduler/common/task/spark/SparkParameters.java

@ -0,0 +1,220 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.spark;
import cn.escheduler.common.enums.ProgramType;
import cn.escheduler.common.process.ResourceInfo;
import cn.escheduler.common.task.AbstractParameters;
import java.util.List;
import java.util.stream.Collectors;
/**
* spark parameters
*/
public class SparkParameters extends AbstractParameters {
/**
* major jar
*/
private ResourceInfo mainJar;
/**
* major class
*/
private String mainClass;
/**
* deploy mode
*/
private String deployMode;
/**
* arguments
*/
private String mainArgs;
/**
* driver-cores Number of cores used by the driver, only in cluster mode
*/
private int driverCores;
/**
* driver-memory Memory for driver
*/
private String driverMemory;
/**
* num-executors Number of executors to launch
*/
private int numExecutors;
/**
* executor-cores Number of cores per executor
*/
private int executorCores;
/**
* Memory per executor
*/
private String executorMemory;
/**
* resource list
*/
private List<ResourceInfo> resourceList;
/**
* The YARN queue to submit to
*/
private String queue;
/**
* other arguments
*/
private String others;
/**
* program type
* 0 JAVA,1 SCALA,2 PYTHON
*/
private ProgramType programType;
public ResourceInfo getMainJar() {
return mainJar;
}
public void setMainJar(ResourceInfo mainJar) {
this.mainJar = mainJar;
}
public String getMainClass() {
return mainClass;
}
public void setMainClass(String mainClass) {
this.mainClass = mainClass;
}
public String getDeployMode() {
return deployMode;
}
public void setDeployMode(String deployMode) {
this.deployMode = deployMode;
}
public String getMainArgs() {
return mainArgs;
}
public void setMainArgs(String mainArgs) {
this.mainArgs = mainArgs;
}
public int getDriverCores() {
return driverCores;
}
public void setDriverCores(int driverCores) {
this.driverCores = driverCores;
}
public String getDriverMemory() {
return driverMemory;
}
public void setDriverMemory(String driverMemory) {
this.driverMemory = driverMemory;
}
public int getNumExecutors() {
return numExecutors;
}
public void setNumExecutors(int numExecutors) {
this.numExecutors = numExecutors;
}
public int getExecutorCores() {
return executorCores;
}
public void setExecutorCores(int executorCores) {
this.executorCores = executorCores;
}
public String getExecutorMemory() {
return executorMemory;
}
public void setExecutorMemory(String executorMemory) {
this.executorMemory = executorMemory;
}
public String getQueue() {
return queue;
}
public void setQueue(String queue) {
this.queue = queue;
}
public List<ResourceInfo> getResourceList() {
return resourceList;
}
public void setResourceList(List<ResourceInfo> resourceList) {
this.resourceList = resourceList;
}
public String getOthers() {
return others;
}
public void setOthers(String others) {
this.others = others;
}
public ProgramType getProgramType() {
return programType;
}
public void setProgramType(ProgramType programType) {
this.programType = programType;
}
@Override
public boolean checkParameters() {
return mainJar != null && programType != null;
}
@Override
public List<String> getResourceFilesList() {
if(resourceList !=null ) {
this.resourceList.add(mainJar);
return resourceList.stream()
.map(p -> p.getRes()).collect(Collectors.toList());
}
return null;
}
}

147
escheduler-common/src/main/java/cn/escheduler/common/task/sql/SqlParameters.java

@ -0,0 +1,147 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.sql;
import cn.escheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.List;
/**
* Sql/Hql parameter
*/
public class SqlParameters extends AbstractParameters {
/**
* data source typeeg MYSQL, POSTGRES, HIVE ...
*/
private String type;
/**
* datasource id
*/
private int datasource;
/**
* sql
*/
private String sql;
/**
* sql type
* 0 query
* 1 NON_QUERY
*/
private int sqlType;
/**
* udf list
*/
private String udfs;
/**
* show type
* 0 TABLE
* 1 TEXT
* 2 attachment
* 3 TABLE+attachment
*/
private String showType;
/**
* SQL connection parameters
*/
private String connParams;
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public int getDatasource() {
return datasource;
}
public void setDatasource(int datasource) {
this.datasource = datasource;
}
public String getSql() {
return sql;
}
public void setSql(String sql) {
this.sql = sql;
}
public String getUdfs() {
return udfs;
}
public void setUdfs(String udfs) {
this.udfs = udfs;
}
public int getSqlType() {
return sqlType;
}
public void setSqlType(int sqlType) {
this.sqlType = sqlType;
}
public String getShowType() {
return showType;
}
public void setShowType(String showType) {
this.showType = showType;
}
public String getConnParams() {
return connParams;
}
public void setConnParams(String connParams) {
this.connParams = connParams;
}
@Override
public boolean checkParameters() {
return datasource != 0 && StringUtils.isNotEmpty(type) && StringUtils.isNotEmpty(sql);
}
@Override
public List<String> getResourceFilesList() {
return new ArrayList<>();
}
@Override
public String toString() {
return "SqlParameters{" +
"type='" + type + '\'' +
", datasource=" + datasource +
", sql='" + sql + '\'' +
", sqlType=" + sqlType +
", udfs='" + udfs + '\'' +
", showType='" + showType + '\'' +
", connParams='" + connParams + '\'' +
'}';
}
}

27
escheduler-common/src/main/java/cn/escheduler/common/task/sql/SqlType.java

@ -0,0 +1,27 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.sql;
public enum SqlType {
/**
* sql type
* 0 query
* 1 NON_QUERY
*/
QUERY, NON_QUERY
}

48
escheduler-common/src/main/java/cn/escheduler/common/task/subprocess/SubProcessParameters.java

@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.task.subprocess;
import cn.escheduler.common.task.AbstractParameters;
import java.util.ArrayList;
import java.util.List;
public class SubProcessParameters extends AbstractParameters {
/**
* process definition id
*/
private Integer processDefinitionId;
public void setProcessDefinitionId(Integer processDefinitionId){
this.processDefinitionId = processDefinitionId;
}
public Integer getProcessDefinitionId(){
return this.processDefinitionId;
}
@Override
public boolean checkParameters() {
return this.processDefinitionId != 0;
}
@Override
public List<String> getResourceFilesList() {
return new ArrayList<>();
}
}

39
escheduler-common/src/main/java/cn/escheduler/common/thread/Stopper.java

@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.thread;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* if the process closes, a signal is placed as true, and all threads get this flag to stop working
*/
public class Stopper {
private static volatile AtomicBoolean signal = new AtomicBoolean(false);
public static final boolean isStoped(){
return signal.get();
}
public static final boolean isRunning(){
return !signal.get();
}
public static final void stop(){
signal.getAndSet(true);
}
}

310
escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadPoolExecutors.java

@ -0,0 +1,310 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.thread;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.PrintWriter;
import java.lang.management.ThreadInfo;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
/**
*
* thread pool's single instance
*
*/
public class ThreadPoolExecutors {
private static final Logger logger = LoggerFactory.getLogger(ThreadPoolExecutors.class);
private static Executor executor;
private static ThreadPoolExecutors threadPoolExecutors;
private ThreadPoolExecutors(){}
public static ThreadPoolExecutors getInstance(){
return getInstance("thread_pool",0);
}
public static ThreadPoolExecutors getInstance(String name, int maxThreads){
if (null == threadPoolExecutors) {
synchronized (ThreadPoolExecutors.class) {
if(null == threadPoolExecutors) {
threadPoolExecutors = new ThreadPoolExecutors();
}
if(null == executor) {
executor = new Executor(null == name? "thread_pool" : name, maxThreads == 0? Runtime.getRuntime().availableProcessors() * 3 : maxThreads);
}
}
}
return threadPoolExecutors;
}
/**
* Executes the given task sometime in the future. The task may execute in a new thread or in an existing pooled thread.
* If the task cannot be submitted for execution, either because this executor has been shutdown or because its capacity has been reached,
* the task is handled by the current RejectedExecutionHandler.
* @param event
*/
public void execute(final Runnable event) {
Executor executor = getExecutor();
if (executor == null) {
logger.error("Cannot execute [" + event + "] because the executor is missing.");
} else {
executor.execute(event);
}
}
public Future<?> submit(Runnable event) {
Executor executor = getExecutor();
if (executor == null) {
logger.error("Cannot submit [" + event + "] because the executor is missing.");
} else {
return executor.submit(event);
}
return null;
}
public Future<?> submit(Callable<?> task) {
Executor executor = getExecutor();
if (executor == null) {
logger.error("Cannot submit [" + task + "] because the executor is missing.");
} else {
return executor.submit(task);
}
return null;
}
public void printStatus() {
Executor executor = getExecutor();
executor.getStatus().dumpInfo();
}
private Executor getExecutor() {
return executor;
}
public void shutdown() {
if (executor != null) {
List<Runnable> wasRunning = executor.threadPoolExecutor
.shutdownNow();
if (!wasRunning.isEmpty()) {
logger.info(executor + " had " + wasRunning + " on shutdown");
}
}
}
/**
* Executor instance.
*/
private static class Executor {
/**
* how long to retain excess threads
*/
final long keepAliveTimeInMillis = 1000;
/**
* the thread pool executor that services the requests
*/
final TrackingThreadPoolExecutor threadPoolExecutor;
/**
* work queue to use - unbounded queue
*/
final BlockingQueue<Runnable> q = new LinkedBlockingQueue<Runnable>();
private final String name;
private static final AtomicLong seqids = new AtomicLong(0);
private final long id;
protected Executor(String name, int maxThreads) {
this.id = seqids.incrementAndGet();
this.name = name;
//create the thread pool executor
this.threadPoolExecutor = new TrackingThreadPoolExecutor(
maxThreads, maxThreads, keepAliveTimeInMillis,
TimeUnit.MILLISECONDS, q);
// name the threads for this threadpool
ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
tfb.setNameFormat(this.name + "-%d");
this.threadPoolExecutor.setThreadFactory(tfb.build());
}
/**
* Submit the event to the queue for handling.
*
* @param event
*/
void execute(final Runnable event) {
this.threadPoolExecutor.execute(event);
}
Future<?> submit(Runnable event) {
return this.threadPoolExecutor.submit(event);
}
Future<?> submit(Callable<?> event) {
return this.threadPoolExecutor.submit(event);
}
@Override
public String toString() {
return getClass().getSimpleName() + "-" + id + "-" + name;
}
public ExecutorStatus getStatus() {
List<Runnable> queuedEvents = Lists.newArrayList();
for (Runnable r : q) {
queuedEvents.add(r);
}
List<RunningEventStatus> running = Lists.newArrayList();
for (Map.Entry<Thread, Runnable> e : threadPoolExecutor
.getRunningTasks().entrySet()) {
Runnable r = e.getValue();
running.add(new RunningEventStatus(e.getKey(), r));
}
return new ExecutorStatus(this, queuedEvents, running);
}
}
/**
* A subclass of ThreadPoolExecutor that keeps track of the Runnables that
* are executing at any given point in time.
*/
static class TrackingThreadPoolExecutor extends ThreadPoolExecutor {
private ConcurrentMap<Thread, Runnable> running = Maps
.newConcurrentMap();
public TrackingThreadPoolExecutor(int corePoolSize,
int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue);
}
@Override
protected void afterExecute(Runnable r, Throwable t) {
super.afterExecute(r, t);
running.remove(Thread.currentThread());
}
@Override
protected void beforeExecute(Thread t, Runnable r) {
Runnable oldPut = running.put(t, r);
assert oldPut == null : "inconsistency for thread " + t;
super.beforeExecute(t, r);
}
/**
* @return a map of the threads currently running tasks inside this
* executor. Each key is an active thread, and the value is the
* task that is currently running. Note that this is not a
* stable snapshot of the map.
*/
public ConcurrentMap<Thread, Runnable> getRunningTasks() {
return running;
}
}
/**
* A snapshot of the status of a particular executor. This includes the
* contents of the executor's pending queue, as well as the threads and
* events currently being processed.
*
* This is a consistent snapshot that is immutable once constructed.
*/
public static class ExecutorStatus {
final Executor executor;
final List<Runnable> queuedEvents;
final List<RunningEventStatus> running;
ExecutorStatus(Executor executor, List<Runnable> queuedEvents,
List<RunningEventStatus> running) {
this.executor = executor;
this.queuedEvents = queuedEvents;
this.running = running;
}
public void dumpInfo() {
PrintWriter out = new PrintWriter(System.out);
out.write("Status for executor: " + executor + "\n");
out.write("=======================================\n");
out.write(queuedEvents.size() + " events queued, "
+ running.size() + " running\n");
if (!queuedEvents.isEmpty()) {
out.write("Queued:\n");
for (Runnable e : queuedEvents) {
out.write(" " + e + "\n");
}
out.write("\n");
}
if (!running.isEmpty()) {
out.write("Running:\n");
for (RunningEventStatus stat : running) {
out.write(" Running on thread '"
+ stat.threadInfo.getThreadName() + "': "
+ stat.event + "\n");
out.write(ThreadUtils.formatThreadInfo(
stat.threadInfo, " "));
out.write("\n");
}
}
out.flush();
}
}
/**
* The status of a particular event that is in the middle of being handled
* by an executor.
*/
public static class RunningEventStatus {
final ThreadInfo threadInfo;
final Runnable event;
public RunningEventStatus(Thread t, Runnable event) {
this.threadInfo = ThreadUtils.getThreadInfo(t);
this.event = event;
}
}
}

202
escheduler-common/src/main/java/cn/escheduler/common/thread/ThreadUtils.java

@ -0,0 +1,202 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.thread;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.util.concurrent.*;
/**
* thread utils
*/
public class ThreadUtils {
private static final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
private static final int STACK_DEPTH = 20;
/**
Wrapper over newCachedThreadPool. Thread names are formatted as prefix-ID, where ID is a
* unique, sequentially assigned integer.
* @param prefix
* @return
*/
public static ThreadPoolExecutor newDaemonCachedThreadPool(String prefix){
ThreadFactory threadFactory = namedThreadFactory(prefix);
return ((ThreadPoolExecutor) Executors.newCachedThreadPool(threadFactory));
}
/**
* Create a thread factory that names threads with a prefix and also sets the threads to daemon.
* @param prefix
* @return
*/
private static ThreadFactory namedThreadFactory(String prefix) {
return new ThreadFactoryBuilder().setDaemon(true).setNameFormat(prefix + "-%d").build();
}
/**
* Create a cached thread pool whose max number of threads is `maxThreadNumber`. Thread names
* are formatted as prefix-ID, where ID is a unique, sequentially assigned integer.
* @param prefix
* @param maxThreadNumber
* @param keepAliveSeconds
* @return
*/
public static ThreadPoolExecutor newDaemonCachedThreadPool(String prefix ,
int maxThreadNumber,
int keepAliveSeconds){
ThreadFactory threadFactory = namedThreadFactory(prefix);
ThreadPoolExecutor threadPool = new ThreadPoolExecutor(
// corePoolSize: the max number of threads to create before queuing the tasks
maxThreadNumber,
// maximumPoolSize: because we use LinkedBlockingDeque, this one is not used
maxThreadNumber,
keepAliveSeconds,
TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
threadFactory);
threadPool.allowCoreThreadTimeOut(true);
return threadPool;
}
/**
* Wrapper over newFixedThreadPool. Thread names are formatted as prefix-ID, where ID is a
* unique, sequentially assigned integer.
* @param nThreads
* @param prefix
* @return
*/
public static ThreadPoolExecutor newDaemonFixedThreadPool(int nThreads , String prefix){
ThreadFactory threadFactory = namedThreadFactory(prefix);
return ((ThreadPoolExecutor) Executors.newFixedThreadPool(nThreads, threadFactory));
}
/**
* Wrapper over newSingleThreadExecutor.
* @param threadName
* @return
*/
public static ExecutorService newDaemonSingleThreadExecutor(String threadName){
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(threadName)
.build();
return Executors.newSingleThreadExecutor(threadFactory);
}
/**
* Wrapper over newDaemonFixedThreadExecutor.
* @param threadName
* @param threadsNum
* @return
*/
public static ExecutorService newDaemonFixedThreadExecutor(String threadName,int threadsNum){
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(threadName)
.build();
return Executors.newFixedThreadPool(threadsNum,threadFactory);
}
/**
* Wrapper over ScheduledThreadPoolExecutor
* @param corePoolSize
* @return
*/
public static ScheduledExecutorService newDaemonThreadScheduledExecutor(String threadName,int corePoolSize) {
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(threadName)
.build();
ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(corePoolSize, threadFactory);
// By default, a cancelled task is not automatically removed from the work queue until its delay
// elapses. We have to enable it manually.
executor.setRemoveOnCancelPolicy(true);
return executor;
}
public static ThreadInfo getThreadInfo(Thread t) {
long tid = t.getId();
return threadBean.getThreadInfo(tid, STACK_DEPTH);
}
/**
* Format the given ThreadInfo object as a String.
* @param indent a prefix for each line, used for nested indentation
*/
public static String formatThreadInfo(ThreadInfo threadInfo, String indent) {
StringBuilder sb = new StringBuilder();
appendThreadInfo(sb, threadInfo, indent);
return sb.toString();
}
/**
* Print all of the thread's information and stack traces.
*
* @param sb
* @param info
* @param indent
*/
public static void appendThreadInfo(StringBuilder sb,
ThreadInfo info,
String indent) {
boolean contention = threadBean.isThreadContentionMonitoringEnabled();
if (info == null) {
sb.append(indent).append("Inactive (perhaps exited while monitoring was done)\n");
return;
}
String taskName = getTaskName(info.getThreadId(), info.getThreadName());
sb.append(indent).append("Thread ").append(taskName).append(":\n");
Thread.State state = info.getThreadState();
sb.append(indent).append(" State: ").append(state).append("\n");
sb.append(indent).append(" Blocked count: ").append(info.getBlockedCount()).append("\n");
sb.append(indent).append(" Waited count: ").append(info.getWaitedCount()).append("\n");
if (contention) {
sb.append(indent).append(" Blocked time: " + info.getBlockedTime()).append("\n");
sb.append(indent).append(" Waited time: " + info.getWaitedTime()).append("\n");
}
if (state == Thread.State.WAITING) {
sb.append(indent).append(" Waiting on ").append(info.getLockName()).append("\n");
} else if (state == Thread.State.BLOCKED) {
sb.append(indent).append(" Blocked on ").append(info.getLockName()).append("\n");
sb.append(indent).append(" Blocked by ").append(
getTaskName(info.getLockOwnerId(), info.getLockOwnerName())).append("\n");
}
sb.append(indent).append(" Stack:").append("\n");
for (StackTraceElement frame: info.getStackTrace()) {
sb.append(indent).append(" ").append(frame.toString()).append("\n");
}
}
private static String getTaskName(long id, String name) {
if (name == null) {
return Long.toString(id);
}
return id + " (" + name + ")";
}
}

697
escheduler-common/src/main/java/cn/escheduler/common/utils/Bytes.java

@ -0,0 +1,697 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
/**
* Utility class that handles Bytes
*/
public class Bytes {
private static final Logger logger = LoggerFactory.getLogger(Bytes.class);
public static final String UTF8_ENCODING = "UTF-8";
//An empty instance.
public static final byte [] EMPTY_BYTE_ARRAY = new byte [0];
/**
* Size of int in bytes
*/
public static final int SIZEOF_INT = Integer.SIZE / Byte.SIZE;
/**
* Size of long in bytes
*/
public static final int SIZEOF_LONG = Long.SIZE / Byte.SIZE;
/**
* Size of short in bytes
*/
public static final int SIZEOF_SHORT = Short.SIZE / Byte.SIZE;
/**
* Put bytes at the specified byte array position.
* @param tgtBytes the byte array
* @param tgtOffset position in the array
* @param srcBytes array to write out
* @param srcOffset source offset
* @param srcLength source length
* @return incremented offset
*/
public static int putBytes(byte[] tgtBytes, int tgtOffset, byte[] srcBytes,
int srcOffset, int srcLength) {
System.arraycopy(srcBytes, srcOffset, tgtBytes, tgtOffset, srcLength);
return tgtOffset + srcLength;
}
/**
* Write a single byte out to the specified byte array position.
* @param bytes the byte array
* @param offset position in the array
* @param b byte to write out
* @return incremented offset
*/
public static int putByte(byte[] bytes, int offset, byte b) {
bytes[offset] = b;
return offset + 1;
}
/**
* Returns a new byte array, copied from the passed ByteBuffer.
* @param bb A ByteBuffer
* @return the byte array
*/
public static byte[] toBytes(ByteBuffer bb) {
int length = bb.limit();
byte [] result = new byte[length];
System.arraycopy(bb.array(), bb.arrayOffset(), result, 0, length);
return result;
}
/**
* @param b Presumed UTF-8 encoded byte array.
* @return String made from <code>b</code>
*/
public static String toString(final byte [] b) {
if (b == null) {
return null;
}
return toString(b, 0, b.length);
}
/**
* Joins two byte arrays together using a separator.
* @param b1 The first byte array.
* @param sep The separator to use.
* @param b2 The second byte array.
*/
public static String toString(final byte [] b1,
String sep,
final byte [] b2) {
return toString(b1, 0, b1.length) + sep + toString(b2, 0, b2.length);
}
/**
* This method will convert utf8 encoded bytes into a string. If
* an UnsupportedEncodingException occurs, this method will eat it
* and return null instead.
*
* @param b Presumed UTF-8 encoded byte array.
* @param off offset into array
* @param len length of utf-8 sequence
* @return String made from <code>b</code> or null
*/
public static String toString(final byte [] b, int off, int len) {
if (b == null) {
return null;
}
if (len == 0) {
return "";
}
return new String(b, off, len, StandardCharsets.UTF_8);
}
/**
* Converts a string to a UTF-8 byte array.
* @param s string
* @return the byte array
*/
public static byte[] toBytes(String s) {
return s.getBytes(StandardCharsets.UTF_8);
}
/**
* Convert a boolean to a byte array. True becomes -1
* and false becomes 0.
*
* @param b value
* @return <code>b</code> encoded in a byte array.
*/
public static byte [] toBytes(final boolean b) {
return new byte[] { b ? (byte) -1 : (byte) 0 };
}
/**
* Reverses {@link #toBytes(boolean)}
* @param b array
* @return True or false.
*/
public static boolean toBoolean(final byte [] b) {
if (b.length != 1) {
throw new IllegalArgumentException("Array has wrong size: " + b.length);
}
return b[0] != (byte) 0;
}
/**
* Convert a long value to a byte array using big-endian.
*
* @param val value to convert
* @return the byte array
*/
public static byte[] toBytes(long val) {
byte [] b = new byte[8];
for (int i = 7; i > 0; i--) {
b[i] = (byte) val;
val >>>= 8;
}
b[0] = (byte) val;
return b;
}
/**
* Converts a byte array to a long value. Reverses
* {@link #toBytes(long)}
* @param bytes array
* @return the long value
*/
public static long toLong(byte[] bytes) {
return toLong(bytes, 0, SIZEOF_LONG);
}
/**
* Converts a byte array to a long value. Assumes there will be
* {@link #SIZEOF_LONG} bytes available.
*
* @param bytes bytes
* @param offset offset
* @return the long value
*/
public static long toLong(byte[] bytes, int offset) {
return toLong(bytes, offset, SIZEOF_LONG);
}
/**
* Converts a byte array to a long value.
*
* @param bytes array of bytes
* @param offset offset into array
* @param length length of data (must be {@link #SIZEOF_LONG})
* @return the long value
* @throws IllegalArgumentException if length is not {@link #SIZEOF_LONG} or
* if there's not enough room in the array at the offset indicated.
*/
public static long toLong(byte[] bytes, int offset, final int length) {
if (length != SIZEOF_LONG || offset + length > bytes.length) {
throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_LONG);
}
long l = 0;
for(int i = offset; i < offset + length; i++) {
l <<= 8;
l ^= bytes[i] & 0xFF;
}
return l;
}
private static IllegalArgumentException
explainWrongLengthOrOffset(final byte[] bytes,
final int offset,
final int length,
final int expectedLength) {
String reason;
if (length != expectedLength) {
reason = "Wrong length: " + length + ", expected " + expectedLength;
} else {
reason = "offset (" + offset + ") + length (" + length + ") exceed the"
+ " capacity of the array: " + bytes.length;
}
return new IllegalArgumentException(reason);
}
/**
* Put a long value out to the specified byte array position.
* @param bytes the byte array
* @param offset position in the array
* @param val long to write out
* @return incremented offset
* @throws IllegalArgumentException if the byte array given doesn't have
* enough room at the offset specified.
*/
public static int putLong(byte[] bytes, int offset, long val) {
if (bytes.length - offset < SIZEOF_LONG) {
throw new IllegalArgumentException("Not enough room to put a long at"
+ " offset " + offset + " in a " + bytes.length + " byte array");
}
for(int i = offset + 7; i > offset; i--) {
bytes[i] = (byte) val;
val >>>= 8;
}
bytes[offset] = (byte) val;
return offset + SIZEOF_LONG;
}
/**
* Presumes float encoded as IEEE 754 floating-point "single format"
* @param bytes byte array
* @return Float made from passed byte array.
*/
public static float toFloat(byte [] bytes) {
return toFloat(bytes, 0);
}
/**
* Presumes float encoded as IEEE 754 floating-point "single format"
* @param bytes array to convert
* @param offset offset into array
* @return Float made from passed byte array.
*/
public static float toFloat(byte [] bytes, int offset) {
return Float.intBitsToFloat(toInt(bytes, offset, SIZEOF_INT));
}
/**
* @param bytes byte array
* @param offset offset to write to
* @param f float value
* @return New offset in <code>bytes</code>
*/
public static int putFloat(byte [] bytes, int offset, float f) {
return putInt(bytes, offset, Float.floatToRawIntBits(f));
}
/**
* @param f float value
* @return the float represented as byte []
*/
public static byte [] toBytes(final float f) {
// Encode it as int
return Bytes.toBytes(Float.floatToRawIntBits(f));
}
/**
* @param bytes byte array
* @return Return double made from passed bytes.
*/
public static double toDouble(final byte [] bytes) {
return toDouble(bytes, 0);
}
/**
* @param bytes byte array
* @param offset offset where double is
* @return Return double made from passed bytes.
*/
public static double toDouble(final byte [] bytes, final int offset) {
return Double.longBitsToDouble(toLong(bytes, offset, SIZEOF_LONG));
}
/**
* @param bytes byte array
* @param offset offset to write to
* @param d value
* @return New offset into array <code>bytes</code>
*/
public static int putDouble(byte [] bytes, int offset, double d) {
return putLong(bytes, offset, Double.doubleToLongBits(d));
}
/**
* Serialize a double as the IEEE 754 double format output. The resultant
* array will be 8 bytes long.
*
* @param d value
* @return the double represented as byte []
*/
public static byte [] toBytes(final double d) {
// Encode it as a long
return Bytes.toBytes(Double.doubleToRawLongBits(d));
}
/**
* Convert an int value to a byte array
* @param val value
* @return the byte array
*/
public static byte[] toBytes(int val) {
byte [] b = new byte[4];
for(int i = 3; i > 0; i--) {
b[i] = (byte) val;
val >>>= 8;
}
b[0] = (byte) val;
return b;
}
/**
* Converts a byte array to an int value
* @param bytes byte array
* @return the int value
*/
public static int toInt(byte[] bytes) {
return toInt(bytes, 0, SIZEOF_INT);
}
/**
* Converts a byte array to an int value
* @param bytes byte array
* @param offset offset into array
* @return the int value
*/
public static int toInt(byte[] bytes, int offset) {
return toInt(bytes, offset, SIZEOF_INT);
}
/**
* Converts a byte array to an int value
* @param bytes byte array
* @param offset offset into array
* @param length length of int (has to be {@link #SIZEOF_INT})
* @return the int value
* @throws IllegalArgumentException if length is not {@link #SIZEOF_INT} or
* if there's not enough room in the array at the offset indicated.
*/
public static int toInt(byte[] bytes, int offset, final int length) {
if (length != SIZEOF_INT || offset + length > bytes.length) {
throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_INT);
}
int n = 0;
for(int i = offset; i < (offset + length); i++) {
n <<= 8;
n ^= bytes[i] & 0xFF;
}
return n;
}
/**
* Put an int value out to the specified byte array position.
* @param bytes the byte array
* @param offset position in the array
* @param val int to write out
* @return incremented offset
* @throws IllegalArgumentException if the byte array given doesn't have
* enough room at the offset specified.
*/
public static int putInt(byte[] bytes, int offset, int val) {
if (bytes.length - offset < SIZEOF_INT) {
throw new IllegalArgumentException("Not enough room to put an int at"
+ " offset " + offset + " in a " + bytes.length + " byte array");
}
for(int i= offset + 3; i > offset; i--) {
bytes[i] = (byte) val;
val >>>= 8;
}
bytes[offset] = (byte) val;
return offset + SIZEOF_INT;
}
/**
* Convert a short value to a byte array of {@link #SIZEOF_SHORT} bytes long.
* @param val value
* @return the byte array
*/
public static byte[] toBytes(short val) {
byte[] b = new byte[SIZEOF_SHORT];
b[1] = (byte) val;
val >>= 8;
b[0] = (byte) val;
return b;
}
/**
* Converts a byte array to a short value
* @param bytes byte array
* @return the short value
*/
public static short toShort(byte[] bytes) {
return toShort(bytes, 0, SIZEOF_SHORT);
}
/**
* Converts a byte array to a short value
* @param bytes byte array
* @param offset offset into array
* @return the short value
*/
public static short toShort(byte[] bytes, int offset) {
return toShort(bytes, offset, SIZEOF_SHORT);
}
/**
* Converts a byte array to a short value
* @param bytes byte array
* @param offset offset into array
* @param length length, has to be {@link #SIZEOF_SHORT}
* @return the short value
* @throws IllegalArgumentException if length is not {@link #SIZEOF_SHORT}
* or if there's not enough room in the array at the offset indicated.
*/
public static short toShort(byte[] bytes, int offset, final int length) {
if (length != SIZEOF_SHORT || offset + length > bytes.length) {
throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_SHORT);
}
short n = 0;
n ^= bytes[offset] & 0xFF;
n <<= 8;
n ^= bytes[offset+1] & 0xFF;
return n;
}
/**
* This method will get a sequence of bytes from pos -> limit,
* but will restore pos after.
* @param buf
* @return byte array
*/
public static byte[] getBytes(ByteBuffer buf) {
int savedPos = buf.position();
byte [] newBytes = new byte[buf.remaining()];
buf.get(newBytes);
buf.position(savedPos);
return newBytes;
}
/**
* Put a short value out to the specified byte array position.
* @param bytes the byte array
* @param offset position in the array
* @param val short to write out
* @return incremented offset
* @throws IllegalArgumentException if the byte array given doesn't have
* enough room at the offset specified.
*/
public static int putShort(byte[] bytes, int offset, short val) {
if (bytes.length - offset < SIZEOF_SHORT) {
throw new IllegalArgumentException("Not enough room to put a short at"
+ " offset " + offset + " in a " + bytes.length + " byte array");
}
bytes[offset+1] = (byte) val;
val >>= 8;
bytes[offset] = (byte) val;
return offset + SIZEOF_SHORT;
}
/**
* Convert a BigDecimal value to a byte array
*
* @param val
* @return the byte array
*/
public static byte[] toBytes(BigDecimal val) {
byte[] valueBytes = val.unscaledValue().toByteArray();
byte[] result = new byte[valueBytes.length + SIZEOF_INT];
int offset = putInt(result, 0, val.scale());
putBytes(result, offset, valueBytes, 0, valueBytes.length);
return result;
}
/**
* Converts a byte array to a BigDecimal
*
* @param bytes
* @return the char value
*/
public static BigDecimal toBigDecimal(byte[] bytes) {
return toBigDecimal(bytes, 0, bytes.length);
}
/**
* Converts a byte array to a BigDecimal value
*
* @param bytes
* @param offset
* @param length
* @return the char value
*/
public static BigDecimal toBigDecimal(byte[] bytes, int offset, final int length) {
if (bytes == null || length < SIZEOF_INT + 1 ||
(offset + length > bytes.length)) {
return null;
}
int scale = toInt(bytes, offset);
byte[] tcBytes = new byte[length - SIZEOF_INT];
System.arraycopy(bytes, offset + SIZEOF_INT, tcBytes, 0, length - SIZEOF_INT);
return new BigDecimal(new BigInteger(tcBytes), scale);
}
/**
* Put a BigDecimal value out to the specified byte array position.
*
* @param bytes the byte array
* @param offset position in the array
* @param val BigDecimal to write out
* @return incremented offset
*/
public static int putBigDecimal(byte[] bytes, int offset, BigDecimal val) {
if (bytes == null) {
return offset;
}
byte[] valueBytes = val.unscaledValue().toByteArray();
byte[] result = new byte[valueBytes.length + SIZEOF_INT];
offset = putInt(result, offset, val.scale());
return putBytes(result, offset, valueBytes, 0, valueBytes.length);
}
/**
* @param a lower half
* @param b upper half
* @return New array that has a in lower half and b in upper half.
*/
public static byte [] add(final byte [] a, final byte [] b) {
return add(a, b, EMPTY_BYTE_ARRAY);
}
/**
* @param a first third
* @param b second third
* @param c third third
* @return New array made from a, b and c
*/
public static byte [] add(final byte [] a, final byte [] b, final byte [] c) {
byte [] result = new byte[a.length + b.length + c.length];
System.arraycopy(a, 0, result, 0, a.length);
System.arraycopy(b, 0, result, a.length, b.length);
System.arraycopy(c, 0, result, a.length + b.length, c.length);
return result;
}
/**
* @param a array
* @param length amount of bytes to grab
* @return First <code>length</code> bytes from <code>a</code>
*/
public static byte [] head(final byte [] a, final int length) {
if (a.length < length) {
return null;
}
byte [] result = new byte[length];
System.arraycopy(a, 0, result, 0, length);
return result;
}
/**
* @param a array
* @param length amount of bytes to snarf
* @return Last <code>length</code> bytes from <code>a</code>
*/
public static byte [] tail(final byte [] a, final int length) {
if (a.length < length) {
return null;
}
byte [] result = new byte[length];
System.arraycopy(a, a.length - length, result, 0, length);
return result;
}
/**
* @param a array
* @param length new array size
* @return Value in <code>a</code> plus <code>length</code> prepended 0 bytes
*/
public static byte [] padHead(final byte [] a, final int length) {
byte[] padding = getPadding(length);
return add(padding,a);
}
private static byte[] getPadding(int length) {
byte[] padding = new byte[length];
for (int i = 0; i < length; i++) {
padding[i] = 0;
}
return padding;
}
/**
* @param a array
* @param length new array size
* @return Value in <code>a</code> plus <code>length</code> appended 0 bytes
*/
public static byte [] padTail(final byte [] a, final int length) {
byte[] padding = getPadding(length);
return add(a,padding);
}
/**
* @param bytes array to hash
* @param offset offset to start from
* @param length length to hash
* */
public static int hashCode(byte[] bytes, int offset, int length) {
int hash = 1;
for (int i = offset; i < offset + length; i++) {
hash = (31 * hash) + (int) bytes[i];
}
return hash;
}
/**
* @param t operands
* @return Array of byte arrays made from passed array of Text
*/
public static byte [][] toByteArrays(final String [] t) {
byte [][] result = new byte[t.length][];
for (int i = 0; i < t.length; i++) {
result[i] = Bytes.toBytes(t[i]);
}
return result;
}
/**
* @param column operand
* @return A byte array of a byte array where first and only entry is
* <code>column</code>
*/
public static byte [][] toByteArrays(final String column) {
return toByteArrays(toBytes(column));
}
/**
* @param column operand
* @return A byte array of a byte array where first and only entry is
* <code>column</code>
*/
public static byte [][] toByteArrays(final byte [] column) {
byte [][] result = new byte[1][];
result[0] = column;
return result;
}
}

292
escheduler-common/src/main/java/cn/escheduler/common/utils/CollectionUtils.java

@ -0,0 +1,292 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.apache.commons.collections.BeanMap;
import org.apache.commons.lang.StringUtils;
import java.util.*;
/**
* Provides utility methods and decorators for {@link Collection} instances.
* <p>
* Various utility methods might put the input objects into a Set/Map/Bag. In case
* the input objects override {@link Object#equals(Object)}, it is mandatory that
* the general contract of the {@link Object#hashCode()} method is maintained.
* <p>
* NOTE: From 4.0, method parameters will take {@link Iterable} objects when possible.
*
* @version $Id: CollectionUtils.java 1686855 2015-06-22 13:00:27Z tn $
* @since 1.0
*/
public class CollectionUtils {
/**
* Returns a new {@link Collection} containing <i>a</i> minus a subset of
* <i>b</i>. Only the elements of <i>b</i> that satisfy the predicate
* condition, <i>p</i> are subtracted from <i>a</i>.
* <p>
* <p>The cardinality of each element <i>e</i> in the returned {@link Collection}
* that satisfies the predicate condition will be the cardinality of <i>e</i> in <i>a</i>
* minus the cardinality of <i>e</i> in <i>b</i>, or zero, whichever is greater.</p>
* <p>The cardinality of each element <i>e</i> in the returned {@link Collection} that does <b>not</b>
* satisfy the predicate condition will be equal to the cardinality of <i>e</i> in <i>a</i>.</p>
*
* @param a the collection to subtract from, must not be null
* @param b the collection to subtract, must not be null
* @return a new collection with the results
* @see Collection#removeAll
*/
public static <T> Collection<T> subtract(Set<T> a, Set<T> b) {
return org.apache.commons.collections4.CollectionUtils.subtract(a, b);
}
public static boolean isNotEmpty(Collection coll) {
return !isEmpty(coll);
}
public static boolean isEmpty(Collection coll) {
return coll == null || coll.isEmpty();
}
/**
* String to map
*
* @param str string
* @param separator separator
* @return
*/
public static Map<String, String> stringToMap(String str, String separator) {
return stringToMap(str, separator, "");
}
/**
* String to map
*
* @param str string
* @param separator separator
* @param keyPrefix prefix
* @return
*/
public static Map<String, String> stringToMap(String str, String separator, String keyPrefix) {
if (null == str || "".equals(str)) {
return null;
}
if (null == separator || "".equals(separator)) {
return null;
}
String[] strings = str.split(separator);
int mapLength = strings.length;
if ((strings.length % 2) != 0) {
mapLength = mapLength + 1;
}
Map<String, String> map = new HashMap<>(mapLength);
for (int i = 0; i < strings.length; i++) {
String[] strArray = strings[i].split("=");
//strArray[0] KEY strArray[1] VALUE
if (StringUtils.isEmpty(keyPrefix)) {
map.put(strArray[0], strArray[1]);
} else {
map.put(keyPrefix + strArray[0], strArray[1]);
}
}
return map;
}
/**
* Helper class to easily access cardinality properties of two collections.
*
* @param <O> the element type
*/
private static class CardinalityHelper<O> {
/**
* Contains the cardinality for each object in collection A.
*/
final Map<O, Integer> cardinalityA;
/**
* Contains the cardinality for each object in collection B.
*/
final Map<O, Integer> cardinalityB;
/**
* Create a new CardinalityHelper for two collections.
*
* @param a the first collection
* @param b the second collection
*/
public CardinalityHelper(final Iterable<? extends O> a, final Iterable<? extends O> b) {
cardinalityA = CollectionUtils.<O>getCardinalityMap(a);
cardinalityB = CollectionUtils.<O>getCardinalityMap(b);
}
/**
* Returns the maximum frequency of an object.
*
* @param obj the object
* @return the maximum frequency of the object
*/
public final int max(final Object obj) {
return Math.max(freqA(obj), freqB(obj));
}
/**
* Returns the minimum frequency of an object.
*
* @param obj the object
* @return the minimum frequency of the object
*/
public final int min(final Object obj) {
return Math.min(freqA(obj), freqB(obj));
}
/**
* Returns the frequency of this object in collection A.
*
* @param obj the object
* @return the frequency of the object in collection A
*/
public int freqA(final Object obj) {
return getFreq(obj, cardinalityA);
}
/**
* Returns the frequency of this object in collection B.
*
* @param obj the object
* @return the frequency of the object in collection B
*/
public int freqB(final Object obj) {
return getFreq(obj, cardinalityB);
}
private final int getFreq(final Object obj, final Map<?, Integer> freqMap) {
final Integer count = freqMap.get(obj);
if (count != null) {
return count.intValue();
}
return 0;
}
}
/**
* returns {@code true} iff the given {@link Collection}s contain
* exactly the same elements with exactly the same cardinalities.
*
* @param a the first collection
* @param b the second collection
* @return Returns true iff the given Collections contain exactly the same elements with exactly the same cardinalities.
* That is, iff the cardinality of e in a is equal to the cardinality of e in b, for each element e in a or b.
*/
public static boolean equalLists(Collection<?> a, Collection<?> b) {
if (a == null && b == null) {
return true;
}
if ((a == null && b != null) || a != null && b == null) {
return false;
}
return isEqualCollection(a, b);
}
/**
* Returns {@code true} iff the given {@link Collection}s contain
* exactly the same elements with exactly the same cardinalities.
* <p>
* That is, iff the cardinality of <i>e</i> in <i>a</i> is
* equal to the cardinality of <i>e</i> in <i>b</i>,
* for each element <i>e</i> in <i>a</i> or <i>b</i>.
*
* @param a the first collection, must not be null
* @param b the second collection, must not be null
* @return <code>true</code> iff the collections contain the same elements with the same cardinalities.
*/
public static boolean isEqualCollection(final Collection<?> a, final Collection<?> b) {
if (a.size() != b.size()) {
return false;
}
final CardinalityHelper<Object> helper = new CardinalityHelper<Object>(a, b);
if (helper.cardinalityA.size() != helper.cardinalityB.size()) {
return false;
}
for (final Object obj : helper.cardinalityA.keySet()) {
if (helper.freqA(obj) != helper.freqB(obj)) {
return false;
}
}
return true;
}
/**
* Returns a {@link Map} mapping each unique element in the given
* {@link Collection} to an {@link Integer} representing the number
* of occurrences of that element in the {@link Collection}.
* <p>
* Only those elements present in the collection will appear as
* keys in the map.
*
* @param <O> the type of object in the returned {@link Map}. This is a super type of <I>.
* @param coll the collection to get the cardinality map for, must not be null
* @return the populated cardinality map
*/
public static <O> Map<O, Integer> getCardinalityMap(final Iterable<? extends O> coll) {
final Map<O, Integer> count = new HashMap<O, Integer>();
for (final O obj : coll) {
final Integer c = count.get(obj);
if (c == null) {
count.put(obj, Integer.valueOf(1));
} else {
count.put(obj, Integer.valueOf(c.intValue() + 1));
}
}
return count;
}
/**
* Removes certain attributes of each object in the list
* @param originList
* @param exclusionSet
* @param <T>
* @return
*/
public static <T extends Object> List<Map<String, Object>> getListByExclusion(List<T> originList, Set<String> exclusionSet) {
List<Map<String, Object>> instanceList = new ArrayList<>();
Map<String, Object> instanceMap;
for (T instance : originList) {
Map<String, Object> dataMap = new BeanMap(instance);
instanceMap = new LinkedHashMap<>(16,0.75f,true);
for (Object key : dataMap.keySet()) {
if (exclusionSet.contains(key.toString())) {
continue;
}
instanceMap.put(key.toString(), dataMap.get(key));
}
instanceList.add(instanceMap);
}
return instanceList;
}
}

73
escheduler-common/src/main/java/cn/escheduler/common/utils/CommonUtils.java

@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import static cn.escheduler.common.Constants.*;
import static cn.escheduler.common.utils.PropertyUtils.getBoolean;
import static cn.escheduler.common.utils.PropertyUtils.getString;
/**
* common utils
*/
public class CommonUtils {
private static final Logger logger = LoggerFactory.getLogger(CommonUtils.class);
/**
* @return get the path of system environment variables
*/
public static String getSystemEnvPath() {
String envPath = getString(ESCHEDULER_ENV_PATH);
if (StringUtils.isEmpty(envPath)) {
envPath = System.getProperty("user.home") + File.separator + ".bash_profile";
}
return envPath;
}
/**
* @return get the path of Python system environment variables
*/
public static String getPythonSystemEnvPath() {
return getString(ESCHEDULER_ENV_PY);
}
/**
* @return get queue implementation name
*/
public static String getQueueImplValue(){
return getString(Constants.SCHEDULER_QUEUE_IMPL);
}
/**
*
* @return is develop mode
*/
public static boolean isDevelopMode() {
return getBoolean(DEVELOPMENT_STATE);
}
}

313
escheduler-common/src/main/java/cn/escheduler/common/utils/DateUtils.java

@ -0,0 +1,313 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
/**
* date utils
*/
public class DateUtils {
private static final Logger logger = LoggerFactory.getLogger(DateUtils.class);
/**
* @return get the formatted date string for the current time
*/
public static String getCurrentTime() {
return getCurrentTime(Constants.YYYY_MM_DD_HH_MM_SS);
}
/**
* @param format
* @return get the date string in the specified format of the current time
*/
public static String getCurrentTime(String format) {
return new SimpleDateFormat(format).format(new Date());
}
/**
* @param date
* @param format e.g. yyyy-MM-dd HH:mm:ss
* @return get the formatted date string
*/
public static String format(Date date, String format) {
return new SimpleDateFormat(format).format(date);
}
/**
* @param date
* @return convert time to yyyy-MM-dd HH:mm:ss format
*/
public static String dateToString(Date date){
return format(date,Constants.YYYY_MM_DD_HH_MM_SS);
}
/**
* @param date
* @return convert string to date and time
*/
public static Date parse(String date,String format){
try {
return new SimpleDateFormat(format).parse(date);
} catch (ParseException e) {
logger.error("error while parse date:" + date, e);
}
return null;
}
/**
* convert date str to yyyy-MM-dd HH:mm:ss format
* @param str
* @return
*/
public static Date stringToDate(String str){
return parse(str,Constants.YYYY_MM_DD_HH_MM_SS);
}
/**
* get seconds between two dates
*
* @param d1
* @param d2
* @return
*/
public static long differSec(Date d1, Date d2) {
return (long) Math.ceil(differMs(d1, d2) / 1000.0);
}
/**
* get ms between two dates
*
* @param d1
* @param d2
* @return
*/
public static long differMs(Date d1, Date d2) {
return Math.abs(d1.getTime() - d2.getTime());
}
/**
* get hours between two dates
*
* @param d1
* @param d2
* @return
*/
public static long diffHours(Date d1, Date d2) {
return (long) Math.ceil(diffMin(d1, d2) / 60.0);
}
/**
* get minutes between two dates
*
* @param d1
* @param d2
* @return
*/
public static long diffMin(Date d1, Date d2) {
return (long) Math.ceil(differSec(d1, d2) / 60.0);
}
/**
* get the date of the specified date in the days before and after
* @param date
* @param day
* @return
*/
public static Date getSomeDay(Date date, int day) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
calendar.add(Calendar.DATE, day);
return calendar.getTime();
}
/**
* compare two dates
*
* @param future
* @param old
* @return
*/
public static boolean compare(Date future, Date old) {
return future.getTime() > old.getTime();
}
/**
* convert schedule string to date
* @param schedule
* @return
*/
public static Date getScheduleDate(String schedule){
return stringToDate(schedule);
}
/**
* format time to readable
*
* @param ms
* @return
*/
public static String format2Readable(long ms) {
long days = ms / (1000 * 60 * 60 * 24);
long hours = (ms % (1000 * 60 * 60 * 24)) / (1000 * 60 * 60);
long minutes = (ms % (1000 * 60 * 60)) / (1000 * 60);
long seconds = (ms % (1000 * 60)) / 1000;
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
/**
* get monday
*
* note: Set the first day of the week to Monday, the default is Sunday
*/
public static Date getMonday(Date date) {
Calendar cal = Calendar.getInstance();
cal.setTime(date);
cal.setFirstDayOfWeek(Calendar.MONDAY);
cal.set(Calendar.DAY_OF_WEEK, Calendar.MONDAY);
return cal.getTime();
}
/**
* get sunday
*
* note: Set the first day of the week to Monday, the default is Sunday
*/
public static Date getSunday(Date date) {
Calendar cal = Calendar.getInstance();
cal.setTime(date);
cal.setFirstDayOfWeek(Calendar.MONDAY);
cal.set(Calendar.DAY_OF_WEEK, Calendar.SUNDAY);
return cal.getTime();
}
/**
* get first day of month
*/
public static Date getFirstDayOfMonth(Date date) {
Calendar cal = Calendar.getInstance();
cal.setTime(date);
cal.set(Calendar.DAY_OF_MONTH, 1);
return cal.getTime();
}
/**
* get first day of month
*/
public static Date getSomeHourOfDay(Date date, int hours) {
Calendar cal = Calendar.getInstance();
cal.setTime(date);
cal.set(Calendar.HOUR_OF_DAY, cal.get(Calendar.HOUR_OF_DAY) - hours);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
return cal.getTime();
}
/**
* get last day of month
*/
public static Date getLastDayOfMonth(Date date) {
Calendar cal = Calendar.getInstance();
cal.setTime(date);
cal.add(Calendar.MONTH, 1);
cal.set(Calendar.DAY_OF_MONTH, 1);
cal.add(Calendar.DAY_OF_MONTH, -1);
return cal.getTime();
}
/**
* return YYYY-MM-DD 00:00:00
* @param inputDay
* @return
*/
public static Date getStartOfDay(Date inputDay){
Calendar cal = Calendar.getInstance();
cal.setTime(inputDay);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
return cal.getTime();
}
/**
* return YYYY-MM-DD 23:59:59
* @param inputDay
* @return
*/
public static Date getEndOfDay(Date inputDay){
Calendar cal = Calendar.getInstance();
cal.setTime(inputDay);
cal.set(Calendar.HOUR_OF_DAY, 23);
cal.set(Calendar.MINUTE, 59);
cal.set(Calendar.SECOND, 59);
return cal.getTime();
}
/**
* return YYYY-MM-DD 00:00:00
* @param inputDay
* @return
*/
public static Date getStartOfHour(Date inputDay){
Calendar cal = Calendar.getInstance();
cal.setTime(inputDay);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
return cal.getTime();
}
/**
* return YYYY-MM-DD 23:59:59
* @param inputDay
* @return
*/
public static Date getEndOfHour(Date inputDay){
Calendar cal = Calendar.getInstance();
cal.setTime(inputDay);
cal.set(Calendar.MINUTE, 59);
cal.set(Calendar.SECOND, 59);
return cal.getTime();
}
}

135
escheduler-common/src/main/java/cn/escheduler/common/utils/DependentUtils.java

@ -0,0 +1,135 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.enums.DependResult;
import cn.escheduler.common.enums.DependentRelation;
import cn.escheduler.common.model.DateInterval;
import cn.escheduler.common.utils.dependent.DependentDateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
public class DependentUtils {
private static final Logger logger = LoggerFactory.getLogger(DependentUtils.class);
public static DependResult getDependResultForRelation(DependentRelation relation,
List<DependResult> dependResultList){
DependResult dependResult = DependResult.SUCCESS;
switch (relation){
case AND:
if(dependResultList.contains(DependResult.FAILED)){
dependResult = DependResult.FAILED;
}else if(dependResultList.contains(DependResult.WAITING)){
dependResult = DependResult.WAITING;
}else{
dependResult = DependResult.SUCCESS;
}
break;
case OR:
if(dependResultList.contains(DependResult.SUCCESS)){
dependResult = DependResult.SUCCESS;
}else if(dependResultList.contains(DependResult.WAITING)){
dependResult = DependResult.WAITING;
}else{
dependResult = DependResult.FAILED;
}
break;
default:
break;
}
return dependResult;
}
/**
* get date interval list by business date and date value.
* @param businessDate
* @param dateValue
* @return
*/
public static List<DateInterval> getDateIntervalList(Date businessDate, String dateValue){
List<DateInterval> result = new ArrayList<>();
switch (dateValue){
case "last1Hour":
result = DependentDateUtils.getLastHoursInterval(businessDate, 1);
break;
case "last2Hours":
result = DependentDateUtils.getLastHoursInterval(businessDate, 2);
break;
case "last3Hours":
result = DependentDateUtils.getLastHoursInterval(businessDate, 3);
break;
case "last1Days":
result = DependentDateUtils.getLastDayInterval(businessDate, 1);
break;
case "last2Days":
result = DependentDateUtils.getLastDayInterval(businessDate, 2);
break;
case "last3Days":
result = DependentDateUtils.getLastDayInterval(businessDate, 3);
break;
case "last7Days":
result = DependentDateUtils.getLastDayInterval(businessDate, 7);
break;
case "lastWeek":
result = DependentDateUtils.getLastWeekInterval(businessDate);
break;
case "lastMonday":
result = DependentDateUtils.getLastWeekOneDayInterval(businessDate, 1);
break;
case "lastTuesday":
result = DependentDateUtils.getLastWeekOneDayInterval(businessDate, 2);
break;
case "lastWednesday":
result = DependentDateUtils.getLastWeekOneDayInterval(businessDate, 3);
break;
case "lastThursday":
result = DependentDateUtils.getLastWeekOneDayInterval(businessDate, 4);
break;
case "lastFriday":
result = DependentDateUtils.getLastWeekOneDayInterval(businessDate, 5);
break;
case "lastSaturday":
result = DependentDateUtils.getLastWeekOneDayInterval(businessDate, 6);
break;
case "lastSunday":
result = DependentDateUtils.getLastWeekOneDayInterval(businessDate, 7);
break;
case "lastMonth":
result = DependentDateUtils.getLastMonthInterval(businessDate);
break;
case "lastMonthBegin":
result = DependentDateUtils.getLastMonthBeginInterval(businessDate, true);
break;
case "lastMonthEnd":
result = DependentDateUtils.getLastMonthBeginInterval(businessDate, false);
break;
default:
break;
}
return result;
}
}

37
escheduler-common/src/main/java/cn/escheduler/common/utils/EncryptionUtils.java

@ -0,0 +1,37 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang3.StringUtils;
/**
* encryption utils
*/
public class EncryptionUtils {
/**
*
* @param rawStr
* @return md5(rawStr)
*/
public static String getMd5(String rawStr) {
return DigestUtils.md5Hex(null == rawStr ? StringUtils.EMPTY : rawStr);
}
}

36
escheduler-common/src/main/java/cn/escheduler/common/utils/EnumFieldUtil.java

@ -0,0 +1,36 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.apache.ibatis.type.EnumOrdinalTypeHandler;
/**
* enum field util
*/
public class EnumFieldUtil {
/**
* Generate a string for the enums field
*
* @param field
* @param enumClass
* @return
*/
public static String genFieldStr(String field, Class<?> enumClass) {
return "#{" + field + ",javaType=" + enumClass.getName() + ",typeHandler=" + EnumOrdinalTypeHandler.class.getName() + "}";
}
}

372
escheduler-common/src/main/java/cn/escheduler/common/utils/FileUtils.java

@ -0,0 +1,372 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.apache.commons.io.Charsets;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.nio.charset.Charset;
import java.nio.charset.UnsupportedCharsetException;
import static cn.escheduler.common.Constants.*;
import static cn.escheduler.common.utils.PropertyUtils.getString;
/**
* file utils
*/
public class FileUtils {
public static final Logger logger = LoggerFactory.getLogger(FileUtils.class);
/**
* get file suffix
*
* @param filename
* @return file suffix
*/
public static String suffix(String filename) {
String fileSuffix = "";
if (StringUtils.isNotEmpty(filename)) {
int lastIndex = filename.lastIndexOf(".");
if (lastIndex > 0) {
fileSuffix = filename.substring(lastIndex + 1);
}
}
return fileSuffix;
}
/**
* get download file absolute path and name
*
* @param filename
* @return download file name
*/
public static String getDownloadFilename(String filename) {
return String.format("%s/%s/%s", getString(DATA_DOWNLOAD_BASEDIR_PATH), DateUtils.getCurrentTime(YYYYMMDDHHMMSS), filename);
}
/**
* get upload file absolute path and name
*
* @param tenantCode tenant code
* @param filename file name
* @return local file path
*/
public static String getUploadFilename(String tenantCode, String filename) {
return String.format("%s/%s/resources/%s",getString(DATA_BASEDIR_PATH), tenantCode, filename);
}
/**
* directory of process execution
* @param projectId
* @param processDefineId
* @param processInstanceId
* @param taskInstanceId
* @return directory of process execution
*/
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId, int taskInstanceId) {
return String.format("%s/process/%s/%s/%s/%s", getString(PROCESS_EXEC_BASEPATH), Integer.toString(projectId),
Integer.toString(processDefineId), Integer.toString(processInstanceId),Integer.toString(taskInstanceId));
}
/**
* directory of process instances
* @param projectId
* @param processDefineId
* @param processInstanceId
* @return directory of process instances
*/
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId) {
return String.format("%s/process/%s/%s/%s", getString(PROCESS_EXEC_BASEPATH), Integer.toString(projectId),
Integer.toString(processDefineId), Integer.toString(processInstanceId));
}
/**
* @return get suffixes for resource files that support online viewing
*/
public static String getResourceViewSuffixs() {
return getString(RESOURCE_VIEW_SUFFIXS);
}
/**
* create directory and user
* @param execLocalPath
* @param userName
* @param logger
* @throws IOException
*/
public static void createWorkDirAndUserIfAbsent(String execLocalPath, String userName, Logger logger) throws IOException{
//if work dir exists, first delete
File execLocalPathFile = new File(execLocalPath);
if (execLocalPathFile.exists()){
org.apache.commons.io.FileUtils.forceDelete(execLocalPathFile);
}
//create work dir
org.apache.commons.io.FileUtils.forceMkdir(execLocalPathFile);
//if not exists this user,then create
if (!OSUtils.getUserList().contains(userName)){
String userGroup = OSUtils.getGroup();
if (org.apache.commons.lang3.StringUtils.isNotEmpty(userGroup)){
logger.info("create os user : {}",userName);
String cmd = String.format("sudo useradd -g %s %s",userGroup,userName);
logger.info("execute cmd : {}",cmd);
OSUtils.exeCmd(cmd);
}
}
}
/**
* write content to file ,if parent path not exists, it will do one's utmost to mkdir
*
* @param content content
* @param filePath target file path
* @return
*/
public static boolean writeContent2File(String content, String filePath) {
boolean flag = true;
BufferedReader bufferedReader = null;
BufferedWriter bufferedWriter = null;
try {
File distFile = new File(filePath);
if (!distFile.getParentFile().exists()) {
distFile.getParentFile().mkdirs();
}
bufferedReader = new BufferedReader(new StringReader(content));
bufferedWriter = new BufferedWriter(new FileWriter(distFile));
char buf[] = new char[1024];
int len;
while ((len = bufferedReader.read(buf)) != -1) {
bufferedWriter.write(buf, 0, len);
}
bufferedWriter.flush();
bufferedReader.close();
bufferedWriter.close();
} catch (IOException e) {
FileUtils.logger.error(e.getMessage(), e);
flag = false;
return flag;
} finally {
IOUtils.closeQuietly(bufferedWriter);
IOUtils.closeQuietly(bufferedReader);
}
return flag;
}
/**
* Writes a String to a file creating the file if it does not exist.
*
* NOTE: As from v1.3, the parent directories of the file will be created
* if they do not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @throws IOException in case of an I/O error
* @throws java.io.UnsupportedEncodingException if the encoding is not supported by the VM
* @since 2.4
*/
public static void writeStringToFile(File file, String data, Charset encoding) throws IOException {
writeStringToFile(file, data, encoding, false);
}
/**
* Writes a String to a file creating the file if it does not exist.
*
* NOTE: As from v1.3, the parent directories of the file will be created
* if they do not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @throws IOException in case of an I/O error
* @throws java.io.UnsupportedEncodingException if the encoding is not supported by the VM
*/
public static void writeStringToFile(File file, String data, String encoding) throws IOException {
writeStringToFile(file, data, encoding, false);
}
/**
* Writes a String to a file creating the file if it does not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @param append if {@code true}, then the String will be added to the
* end of the file rather than overwriting
* @throws IOException in case of an I/O error
* @since 2.3
*/
public static void writeStringToFile(File file, String data, Charset encoding, boolean append) throws IOException {
OutputStream out = null;
try {
out = openOutputStream(file, append);
IOUtils.write(data, out, encoding);
out.close(); // don't swallow close Exception if copy completes normally
} finally {
IOUtils.closeQuietly(out);
}
}
/**
* Writes a String to a file creating the file if it does not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @param append if {@code true}, then the String will be added to the
* end of the file rather than overwriting
* @throws IOException in case of an I/O error
* @throws UnsupportedCharsetException
* thrown instead of {@link UnsupportedEncodingException} in version 2.2 if the encoding is not
* supported by the VM
* @since 2.1
*/
public static void writeStringToFile(File file, String data, String encoding, boolean append) throws IOException {
writeStringToFile(file, data, Charsets.toCharset(encoding), append);
}
/**
* Writes a String to a file creating the file if it does not exist using the default encoding for the VM.
*
* @param file the file to write
* @param data the content to write to the file
* @throws IOException in case of an I/O error
*/
public static void writeStringToFile(File file, String data) throws IOException {
writeStringToFile(file, data, Charset.defaultCharset(), false);
}
/**
* Writes a String to a file creating the file if it does not exist using the default encoding for the VM.
*
* @param file the file to write
* @param data the content to write to the file
* @param append if {@code true}, then the String will be added to the
* end of the file rather than overwriting
* @throws IOException in case of an I/O error
* @since 2.1
*/
public static void writeStringToFile(File file, String data, boolean append) throws IOException {
writeStringToFile(file, data, Charset.defaultCharset(), append);
}
/**
* Opens a {@link FileOutputStream} for the specified file, checking and
* creating the parent directory if it does not exist.
* <p>
* At the end of the method either the stream will be successfully opened,
* or an exception will have been thrown.
* <p>
* The parent directory will be created if it does not exist.
* The file will be created if it does not exist.
* An exception is thrown if the file object exists but is a directory.
* An exception is thrown if the file exists but cannot be written to.
* An exception is thrown if the parent directory cannot be created.
*
* @param file the file to open for output, must not be {@code null}
* @return a new {@link FileOutputStream} for the specified file
* @throws IOException if the file object is a directory
* @throws IOException if the file cannot be written to
* @throws IOException if a parent directory needs creating but that fails
* @since 1.3
*/
public static FileOutputStream openOutputStream(File file) throws IOException {
return openOutputStream(file, false);
}
/**
* Opens a {@link FileOutputStream} for the specified file, checking and
* creating the parent directory if it does not exist.
* <p>
* At the end of the method either the stream will be successfully opened,
* or an exception will have been thrown.
* <p>
* The parent directory will be created if it does not exist.
* The file will be created if it does not exist.
* An exception is thrown if the file object exists but is a directory.
* An exception is thrown if the file exists but cannot be written to.
* An exception is thrown if the parent directory cannot be created.
*
* @param file the file to open for output, must not be {@code null}
* @param append if {@code true}, then bytes will be added to the
* end of the file rather than overwriting
* @return a new {@link FileOutputStream} for the specified file
* @throws IOException if the file object is a directory
* @throws IOException if the file cannot be written to
* @throws IOException if a parent directory needs creating but that fails
* @since 2.1
*/
public static FileOutputStream openOutputStream(File file, boolean append) throws IOException {
if (file.exists()) {
if (file.isDirectory()) {
throw new IOException("File '" + file + "' exists but is a directory");
}
if (file.canWrite() == false) {
throw new IOException("File '" + file + "' cannot be written to");
}
} else {
File parent = file.getParentFile();
if (parent != null) {
if (!parent.mkdirs() && !parent.isDirectory()) {
throw new IOException("Directory '" + parent + "' could not be created");
}
}
}
return new FileOutputStream(file, append);
}
/**
* deletes a directory recursively
* @param dir
*/
public static void deleteDir(String dir) throws IOException {
org.apache.commons.io.FileUtils.deleteDirectory(new File(dir));
}
/**
* Deletes a file. If file is a directory, delete it and all sub-directories.
* <p>
* The difference between File.delete() and this method are:
* <ul>
* <li>A directory to be deleted does not have to be empty.</li>
* <li>You get exceptions when a file or directory cannot be deleted.
* (java.io.File methods returns a boolean)</li>
* </ul>
*
* @param filename
* @throws IOException in case deletion is unsuccessful
*/
public static void deleteFile(String filename) throws IOException {
org.apache.commons.io.FileUtils.forceDelete(new File(filename));
}
}

486
escheduler-common/src/main/java/cn/escheduler/common/utils/HadoopUtils.java

@ -0,0 +1,486 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ExecutionStatus;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.client.cli.RMAdminCLI;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static cn.escheduler.common.Constants.*;
import static cn.escheduler.common.utils.PropertyUtils.getInt;
import static cn.escheduler.common.utils.PropertyUtils.getString;
/**
* hadoop utils
* single instance
*/
public class HadoopUtils implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class);
private static volatile HadoopUtils instance = new HadoopUtils();
private static volatile Configuration configuration;
private static FileSystem fs;
private HadoopUtils(){
init();
}
public static HadoopUtils getInstance(){
return instance;
}
/**
* init hadoop configuration
*/
private void init() {
if (configuration == null) {
synchronized (HadoopUtils.class) {
if (configuration == null) {
try {
configuration = new Configuration();
String defaultFS = configuration.get(FS_DEFAULTFS);
//first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file
// the default is the local file system
if(defaultFS.startsWith("file")){
String defaultFSProp = getString(FS_DEFAULTFS);
if(StringUtils.isNotBlank(defaultFSProp)){
configuration.set(FS_DEFAULTFS,defaultFSProp);
}else{
logger.error("property:{} can not to be empty, please set!");
throw new RuntimeException("property:{} can not to be empty, please set!");
}
}else{
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", FS_DEFAULTFS, defaultFS);
}
if (fs == null) {
fs = FileSystem.get(configuration);
}
String rmHaIds = getString(YARN_RESOURCEMANAGER_HA_RM_IDS);
String appAddress = getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
if (!StringUtils.isEmpty(rmHaIds)) {
appAddress = getAppAddress(appAddress, rmHaIds);
logger.info("appAddress : {}", appAddress);
}
configuration.set(Constants.YARN_APPLICATION_STATUS_ADDRESS, appAddress);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
}
}
}
/**
* @return Configuration
*/
public Configuration getConfiguration() {
return configuration;
}
/**
* get application url
*
* @param applicationId
* @return
*/
public String getApplicationUrl(String applicationId) {
return String.format(configuration.get(YARN_APPLICATION_STATUS_ADDRESS), applicationId);
}
/**
* cat file on hdfs
*
* @param hdfsFilePath hdfs file path
* @return byte[]
*/
public byte[] catFile(String hdfsFilePath) throws IOException {
if(StringUtils.isBlank(hdfsFilePath)){
logger.error("hdfs file path:{} is blank",hdfsFilePath);
return null;
}
FSDataInputStream fsDataInputStream = fs.open(new Path(hdfsFilePath));
return IOUtils.toByteArray(fsDataInputStream);
}
/**
* cat file on hdfs
*
* @param hdfsFilePath hdfs file path
* @param skipLineNums skip line numbers
* @param limit read how many lines
* @return
*/
public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException {
if(StringUtils.isBlank(hdfsFilePath)){
logger.error("hdfs file path:{} is blank",hdfsFilePath);
return null;
}
FSDataInputStream in = fs.open(new Path(hdfsFilePath));
BufferedReader br = new BufferedReader(new InputStreamReader(in));
Stream<String> stream = br.lines().skip(skipLineNums).limit(limit);
return stream.collect(Collectors.toList());
}
/**
* make the given file and all non-existent parents into
* directories. Has the semantics of Unix 'mkdir -p'.
* Existence of the directory hierarchy is not an error.
*
* @param hdfsPath path to create
*/
public boolean mkdir(String hdfsPath) throws IOException {
return fs.mkdirs(new Path(hdfsPath));
}
/**
* copy files between FileSystems
*
* @param srcPath source hdfs path
* @param dstPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
* @return 是否成功
*/
public boolean copy(String srcPath, String dstPath, boolean deleteSource, boolean overwrite) throws IOException {
return FileUtil.copy(fs, new Path(srcPath), fs, new Path(dstPath), deleteSource, overwrite, fs.getConf());
}
/**
* the src file is on the local disk. Add it to FS at
* the given dst name.
* @param srcFile local file
* @param dstHdfsPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
*/
public boolean copyLocalToHdfs(String srcFile, String dstHdfsPath, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcFile);
Path dstPath= new Path(dstHdfsPath);
fs.copyFromLocalFile(deleteSource, overwrite, srcPath, dstPath);
return true;
}
/**
* copy hdfs file to local
*
* @param srcHdfsFilePath source hdfs file path
* @param dstFile destination file
* @param deleteSource delete source
* @param overwrite overwrite
* @return
* @throws IOException
*/
public boolean copyHdfsToLocal(String srcHdfsFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcHdfsFilePath);
File dstPath = new File(dstFile);
if (dstPath.exists()) {
if (dstPath.isFile()) {
if (overwrite) {
dstPath.delete();
}
} else {
logger.error("destination file must be a file");
}
}
if(!dstPath.getParentFile().exists()){
dstPath.getParentFile().mkdirs();
}
return FileUtil.copy(fs, srcPath, dstPath, deleteSource, fs.getConf());
}
/**
*
* delete a file
*
* @param hdfsFilePath the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException
*/
public boolean delete(String hdfsFilePath, boolean recursive) throws IOException {
return fs.delete(new Path(hdfsFilePath), recursive);
}
/**
* check if exists
*
* @param hdfsFilePath source file path
* @return
*/
public boolean exists(String hdfsFilePath) throws IOException {
return fs.exists(new Path(hdfsFilePath));
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
* @param src path to be renamed
* @param dst new path after rename
* @throws IOException on failure
* @return true if rename is successful
*/
public boolean rename(String src, String dst) throws IOException {
return fs.rename(new Path(src), new Path(dst));
}
/**
* get the state of an application
*
* @param applicationId
* @return the return may be null or there may be other parse exceptions
* @throws JSONException
* @throws IOException
*/
public ExecutionStatus getApplicationStatus(String applicationId) throws JSONException {
if (StringUtils.isEmpty(applicationId)) {
return null;
}
String applicationUrl = getApplicationUrl(applicationId);
String responseContent = HttpUtils.get(applicationUrl);
JSONObject jsonObject = JSONObject.parseObject(responseContent);
String result = jsonObject.getJSONObject("app").getString("finalStatus");
switch (result) {
case ACCEPTED:
return ExecutionStatus.SUBMITTED_SUCCESS;
case SUCCEEDED:
return ExecutionStatus.SUCCESS;
case NEW:
case NEW_SAVING:
case SUBMITTED:
case FAILED:
return ExecutionStatus.FAILURE;
case KILLED:
return ExecutionStatus.KILL;
case RUNNING:
default:
return ExecutionStatus.RUNNING_EXEUTION;
}
}
/**
*
* @return data hdfs path
*/
public static String getHdfsDataBasePath() {
return getString(DATA_STORE_2_HDFS_BASEPATH);
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @return hdfs resource dir
*/
public static String getHdfsDir(String tenantCode) {
return String.format("%s/resources", getHdfsTenantDir(tenantCode));
}
/**
* get udf dir on hdfs
*
* @param tenantCode tenant code
* @return get udf dir on hdfs
*/
public static String getHdfsUdfDir(String tenantCode) {
return String.format("%s/udfs", getHdfsTenantDir(tenantCode));
}
/**
* get absolute path and name for file on hdfs
*
* @param tenantCode tenant code
* @param filename file name
* @return get absolute path and name for file on hdfs
*/
public static String getHdfsFilename(String tenantCode, String filename) {
return String.format("%s/%s", getHdfsDir(tenantCode), filename);
}
/**
* get absolute path and name for udf file on hdfs
*
* @param tenantCode tenant code
* @param filename file name
* @return get absolute path and name for udf file on hdfs
*/
public static String getHdfsUdfFilename(String tenantCode, String filename) {
return String.format("%s/%s", getHdfsUdfDir(tenantCode), filename);
}
/**
* @return file directory of tenants on hdfs
*/
private static String getHdfsTenantDir(String tenantCode) {
return String.format("%s/%s", getString(DATA_STORE_2_HDFS_BASEPATH), tenantCode);
}
/**
* getAppAddress
*
* @param appAddress
* @param rmHa
* @return
*/
public static String getAppAddress(String appAddress, String rmHa) {
//get active ResourceManager
String activeRM = YarnHAAdminUtils.getAcitveRMName(rmHa);
String[] split1 = appAddress.split(DOUBLE_SLASH);
if (split1.length != 2) {
return null;
}
String start = split1[0] + DOUBLE_SLASH;
String[] split2 = split1[1].split(COLON);
if (split2.length != 2) {
return null;
}
String end = COLON + split2[1];
return start + activeRM + end;
}
@Override
public void close() throws IOException {
if (fs != null) {
try {
fs.close();
} catch (IOException e) {
logger.error("Close HadoopUtils instance failed", e);
throw new IOException("Close HadoopUtils instance failed", e);
}
}
}
/**
* yarn ha admin utils
*/
private static final class YarnHAAdminUtils extends RMAdminCLI {
private static final Logger logger = LoggerFactory.getLogger(YarnHAAdminUtils.class);
/**
* get active resourcemanager
*
* @param rmIds
* @return
*/
public static String getAcitveRMName(String rmIds) {
String[] rmIdArr = rmIds.split(COMMA);
int activeResourceManagerPort = getInt(HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088);
String yarnUrl = "http://%s:" + activeResourceManagerPort + "/ws/v1/cluster/info";
String state = null;
try {
/**
* send http get request to rm1
*/
state = getRMState(String.format(yarnUrl, rmIdArr[0]));
if (HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmIdArr[0];
} else if (HADOOP_RM_STATE_STANDBY.equals(state)) {
state = getRMState(String.format(yarnUrl, rmIdArr[1]));
if (HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmIdArr[1];
}
} else {
return null;
}
} catch (Exception e) {
state = getRMState(String.format(yarnUrl, rmIdArr[1]));
if (HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmIdArr[0];
}
}
return null;
}
/**
* get ResourceManager state
*
* @param url
* @return
*/
public static String getRMState(String url) {
String retStr = HttpUtils.get(url);
if (StringUtils.isEmpty(retStr)) {
return null;
}
//to json
JSONObject jsonObject = JSON.parseObject(retStr);
//get ResourceManager state
String state = jsonObject.getJSONObject("clusterInfo").getString("haState");
return state;
}
}
}

100
escheduler-common/src/main/java/cn/escheduler/common/utils/HttpUtils.java

@ -0,0 +1,100 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import org.apache.http.HttpEntity;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* http utils
*/
public class HttpUtils {
public static final Logger logger = LoggerFactory.getLogger(HttpUtils.class);
/**
* get http request content
* @param url
* @return http response
*/
public static String get(String url){
CloseableHttpClient httpclient = HttpClients.createDefault();
HttpGet httpget = new HttpGet(url);
/** set timeout、request time、socket timeout */
RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(Constants.HTTP_CONNECT_TIMEOUT)
.setConnectionRequestTimeout(Constants.HTTP_CONNECTION_REQUEST_TIMEOUT)
.setSocketTimeout(Constants.SOCKET_TIMEOUT)
.setRedirectsEnabled(true)
.build();
httpget.setConfig(requestConfig);
String responseContent = null;
CloseableHttpResponse response = null;
try {
response = httpclient.execute(httpget);
//check response status is 200
if (response.getStatusLine().getStatusCode() == 200) {
HttpEntity entity = response.getEntity();
if (entity != null) {
responseContent = EntityUtils.toString(entity, Constants.UTF_8);
}else{
logger.warn("http entity is null");
}
}else{
logger.error("htt get:{} response status code is not 200!");
}
}catch (Exception e){
logger.error(e.getMessage(),e);
}finally {
try {
if (response != null) {
EntityUtils.consume(response.getEntity());
response.close();
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
}
if (httpget != null && !httpget.isAborted()) {
httpget.releaseConnection();
httpget.abort();
}
if (httpclient != null) {
try {
httpclient.close();
} catch (IOException e) {
logger.error(e.getMessage(),e);
}
}
}
return responseContent;
}
}

258
escheduler-common/src/main/java/cn/escheduler/common/utils/JSONUtils.java

@ -0,0 +1,258 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.TypeReference;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.*;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
/**
* json utils
*/
public class JSONUtils {
private static final Logger logger = LoggerFactory.getLogger(JSONUtils.class);
/**
* can use static singleton, inject: just make sure to reuse!
*/
private static final ObjectMapper objectMapper = new ObjectMapper();
/**
* init
*/
private static final JSONUtils instance = new JSONUtils();
private JSONUtils() {
//Feature that determines whether encountering of unknown properties, false means not analyzer unknown properties
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false).setTimeZone(TimeZone.getDefault());
}
/**
* json representation of object
* @param object
* @return object to json string
*/
public static String toJson(Object object) {
try{
return JSONObject.toJSONString(object,false);
} catch (Exception e) {
logger.error("object to json exception!",e);
}
return null;
}
/**
*
* This method deserializes the specified Json into an object of the specified class. It is not
* suitable to use if the specified class is a generic type since it will not have the generic
* type information because of the Type Erasure feature of Java. Therefore, this method should not
* be used if the desired type is a generic type. Note that this method works fine if the any of
* the fields of the specified object are generics, just the object itself should not be a
* generic type.
*
* @param json the string from which the object is to be deserialized
* @param clazz the class of T
* @return an object of type T from the string
* classOfT
*/
public static <T> T parseObject(String json, Class<T> clazz) {
if (StringUtils.isEmpty(json)) {
return null;
}
try {
return JSONObject.parseObject(json, clazz);
} catch (Exception e) {
logger.error("parse object exception!",e);
}
return null;
}
/**
* json to list
*
* @param json
* @param clazz c
* @param <T>
* @return
*/
public static <T> List<T> toList(String json, Class<T> clazz) {
if (StringUtils.isEmpty(json)) {
return null;
}
try {
return JSONArray.parseArray(json, clazz);
} catch (Exception e) {
logger.error("JSONArray.parseArray exception!",e);
}
return null;
}
/**
* check json object valid
*
* @param json
* @return
*/
public static boolean checkJsonVaild(String json) {
if (StringUtils.isEmpty(json)) {
return false;
}
try {
objectMapper.readTree(json);
return true;
} catch (IOException e) {
logger.error("check json object valid exception!",e);
}
return false;
}
/**
* Method for finding a JSON Object field with specified name in this
* node or its child nodes, and returning value it has.
* If no matching field is found in this node or its descendants, returns null.
*
* @param fieldName Name of field to look for
*
* @return Value of first matching node found, if any; null if none
*/
public static String findValue(JsonNode jsonNode, String fieldName) {
JsonNode node = jsonNode.findValue(fieldName);
if (node == null) {
return null;
}
return node.toString();
}
/**
* json to map
*
* {@link #toMap(String, Class, Class)}
*
* @param json
* @return
*/
public static Map<String, String> toMap(String json) {
if (StringUtils.isEmpty(json)) {
return null;
}
try {
return JSONObject.parseObject(json, new TypeReference<HashMap<String, String>>(){});
} catch (Exception e) {
logger.error("json to map exception!",e);
}
return null;
}
/**
*
* json to map
*
* <pre>
* String jsonStr = "{\"id\":\"1001\",\"name\":\"Jobs\"}";
* Map<String,String> models = JSONUtils.toMap(jsonStr, String.class,String.class);
* </pre>
* @param json
* @param classK
* @param classV
* @param <K>
* @param <V>
* @return
*/
public static <K, V> Map<K, V> toMap(String json, Class<K> classK, Class<V> classV) {
if (StringUtils.isEmpty(json)) {
return null;
}
try {
return JSONObject.parseObject(json, new TypeReference<HashMap<K, V>>() {});
} catch (Exception e) {
logger.error("json to map exception!",e);
}
return null;
}
/**
* 对象装json字符串 <p>
*
* @return json string
*/
public static String toJsonString(Object object) {
try{
return JSONObject.toJSONString(object,false);
} catch (Exception e) {
throw new RuntimeException("Json deserialization exception.", e);
}
}
/**
* json serializer
*/
public static class JsonDataSerializer extends JsonSerializer<String> {
@Override
public void serialize(String value, JsonGenerator gen, SerializerProvider provider) throws IOException {
gen.writeRawValue(value);
}
}
/**
* json data deserializer
*/
public static class JsonDataDeserializer extends JsonDeserializer<String> {
@Override
public String deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
JsonNode node = p.getCodec().readTree(p);
return node.toString();
}
}
}

297
escheduler-common/src/main/java/cn/escheduler/common/utils/OSUtils.java

@ -0,0 +1,297 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.shell.ShellExecutor;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import oshi.SystemInfo;
import oshi.hardware.CentralProcessor;
import oshi.hardware.GlobalMemory;
import oshi.hardware.HardwareAbstractionLayer;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.math.RoundingMode;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.List;
/**
* os utils
*
*/
public class OSUtils {
private static final Logger logger = LoggerFactory.getLogger(OSUtils.class);
private static final SystemInfo SI = new SystemInfo();
public static final String TWO_DECIMAL = "0.00";
private static HardwareAbstractionLayer hal = SI.getHardware();
private OSUtils() {}
/**
* get memory usage
* Keep 2 decimal
* @return percent %
*/
public static double memoryUsage() {
GlobalMemory memory = hal.getMemory();
double memoryUsage = (memory.getTotal() - memory.getAvailable() - memory.getSwapUsed()) * 0.1 / memory.getTotal() * 10;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(memoryUsage));
}
/**
* get available physical memory size
*
* Keep 2 decimal
* @return available Physical Memory Size, unit: G
*/
public static double availablePhysicalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = (memory.getAvailable() + memory.getSwapUsed()) /1024.0/1024/1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* get total physical memory size
*
* Keep 2 decimal
* @return available Physical Memory Size, unit: G
*/
public static double totalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = memory.getTotal() /1024.0/1024/1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* load average
*
* @return
*/
public static double loadAverage() {
double loadAverage = hal.getProcessor().getSystemLoadAverage();
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(loadAverage));
}
/**
* get cpu usage
*
* @return
*/
public static double cpuUsage() {
CentralProcessor processor = hal.getProcessor();
double cpuUsage = processor.getSystemCpuLoad();
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(cpuUsage));
}
/**
* get user list
*
* @return
*/
public static List<String> getUserList() {
List<String> userList = new ArrayList<>();
BufferedReader bufferedReader = null;
try {
bufferedReader = new BufferedReader(new InputStreamReader(new FileInputStream("/etc/passwd")));
String line;
while ((line = bufferedReader.readLine()) != null) {
if (line.contains(":")) {
String[] userInfo = line.split(":");
userList.add(userInfo[0]);
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
} finally {
try {
bufferedReader.close();
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
}
return userList;
}
/**
* get system group information
* @return
* @throws IOException
*/
public static String getGroup() throws IOException {
String result = exeCmd("groups");
if (StringUtils.isNotEmpty(result)) {
String[] groupInfo = StringUtils.split(result);
return groupInfo[0];
}
return null;
}
/**
* Execute the corresponding command of Linux or Windows
*
* @param command
* @return
* @throws IOException
*/
public static String exeCmd(String command) throws IOException {
BufferedReader br = null;
try {
Process p = Runtime.getRuntime().exec(command);
br = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line;
StringBuilder sb = new StringBuilder();
while ((line = br.readLine()) != null) {
sb.append(line + "\n");
}
return sb.toString();
} finally {
if (br != null) {
try {
br.close();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
}
}
/**
* Execute the shell
* @param command
* @return
* @throws IOException
*/
public static String exeShell(String command) throws IOException {
return ShellExecutor.execCommand("groups");
}
/**
* get process id
* @return
*/
public static int getProcessID() {
RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean();
return Integer.parseInt(runtimeMXBean.getName().split("@")[0]);
}
/**
* get local host
* @return
*/
public static String getHost(){
try {
return InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
logger.error(e.getMessage(),e);
}
return null;
}
/**
* whether is macOS
*/
public static boolean isMacOS() {
String os = System.getProperty("os.name");
return os.startsWith("Mac");
}
/**
* whether is windows
*/
public static boolean isWindows() {
String os = System.getProperty("os.name");
return os.startsWith("Windows");
}
/**
* check memory and cpu usage
* @param conf
* @return
*/
public static Boolean checkResource(Configuration conf, Boolean isMaster){
double systemCpuLoad;
double systemReservedMemory;
if(isMaster){
systemCpuLoad = conf.getDouble(Constants.MASTER_MAX_CPULOAD_AVG, Constants.defaultMasterCpuLoad);
systemReservedMemory = conf.getDouble(Constants.MASTER_RESERVED_MEMORY, Constants.defaultMasterReservedMemory);
}else{
systemCpuLoad = conf.getDouble(Constants.WORKER_MAX_CPULOAD_AVG, Constants.defaultWorkerCpuLoad);
systemReservedMemory = conf.getDouble(Constants.WORKER_RESERVED_MEMORY, Constants.defaultWorkerReservedMemory);
}
// judging usage
double loadAverage = OSUtils.loadAverage();
//
double availablePhysicalMemorySize = OSUtils.availablePhysicalMemorySize();
if(loadAverage > systemCpuLoad || availablePhysicalMemorySize < systemReservedMemory){
logger.warn("load or availablePhysicalMemorySize(G) is too high, it's availablePhysicalMemorySize(G):{},loadAvg:{}", availablePhysicalMemorySize , loadAverage);
return false;
}else{
return true;
}
}
}

162
escheduler-common/src/main/java/cn/escheduler/common/utils/ParameterUtils.java

@ -0,0 +1,162 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.CommandType;
import cn.escheduler.common.enums.DataType;
import cn.escheduler.common.process.Property;
import cn.escheduler.common.utils.placeholder.BusinessTimeUtils;
import cn.escheduler.common.utils.placeholder.PlaceholderUtils;
import cn.escheduler.common.utils.placeholder.TimePlaceholderUtils;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.PreparedStatement;
import java.text.ParseException;
import java.util.*;
/**
* parameter parse utils
*/
public class ParameterUtils {
private static final Logger logger = LoggerFactory.getLogger(ParameterUtils.class);
/**
* convert parameters place holders
*
* @param parameterString
* @param parameterMap
* @return
*/
public static String convertParameterPlaceholders(String parameterString, Map<String, String> parameterMap) {
if (StringUtils.isEmpty(parameterString)) {
return parameterString;
}
//Get current time, schedule execute time
String cronTimeStr = parameterMap.get(Constants.PARAMETER_DATETIME);
Date cronTime = null;
if (StringUtils.isNotEmpty(cronTimeStr)) {
try {
cronTime = DateUtils.parseDate(cronTimeStr, new String[]{Constants.PARAMETER_FORMAT_TIME});
} catch (ParseException e) {
logger.error(String.format("parse %s exception", cronTimeStr), e);
}
} else {
cronTime = new Date();
}
// replace variable ${} form,refers to the replacement of system variables and custom variables
parameterString = PlaceholderUtils.replacePlaceholders(parameterString, parameterMap, true);
// replace time $[...] form, eg. $[yyyyMMdd]
if (cronTime != null) {
parameterString = TimePlaceholderUtils.replacePlaceholders(parameterString, cronTime, true);
}
return parameterString;
}
/**
* set in parameter
* @param index
* @param stmt
* @param dataType
* @param value
* @throws Exception
*/
public static void setInParameter(int index, PreparedStatement stmt, DataType dataType, String value)throws Exception{
if (dataType.equals(DataType.VARCHAR)){
stmt.setString(index,value);
}else if (dataType.equals(DataType.INTEGER)){
stmt.setInt(index, Integer.parseInt(value));
}else if (dataType.equals(DataType.LONG)){
stmt.setLong(index, Long.parseLong(value));
}else if (dataType.equals(DataType.FLOAT)){
stmt.setFloat(index, Float.parseFloat(value));
}else if (dataType.equals(DataType.DOUBLE)){
stmt.setDouble(index, Double.parseDouble(value));
}else if (dataType.equals(DataType.DATE)){
stmt.setString(index, value);
}else if (dataType.equals(DataType.TIME)){
stmt.setString(index, value);
}else if (dataType.equals(DataType.TIMESTAMP)){
stmt.setString(index, value);
}else if (dataType.equals(DataType.BOOLEAN)){
stmt.setBoolean(index,Boolean.parseBoolean(value));
}
}
/**
* curing user define parameters
*
* @return
*/
public static String curingGlobalParams(Map<String,String> globalParamMap, List<Property> globalParamList,
CommandType commandType, Date scheduleTime){
Map<String, String> globalMap = new HashMap<>();
if(globalParamMap!= null){
globalMap.putAll(globalParamMap);
}
Map<String,String> allParamMap = new HashMap<>();
//如果是补数,需要传入一个补数时间,根据任务类型
Map<String,String> timeParams = BusinessTimeUtils
.getBusinessTime(commandType,scheduleTime);
if (timeParams != null) {
allParamMap.putAll(timeParams);
}
if (globalMap != null) {
allParamMap.putAll(globalMap);
}
Set<Map.Entry<String, String>> entries = allParamMap.entrySet();
Map<String,String> resolveMap = new HashMap<>();
for (Map.Entry<String,String> entry : entries){
String val = entry.getValue();
if (val.startsWith("$")){
String str = ParameterUtils.convertParameterPlaceholders(val, allParamMap);
resolveMap.put(entry.getKey(),str);
}
}
if (globalMap != null){
globalMap.putAll(resolveMap);
}
if (globalParamList != null && globalParamList.size() > 0){
for (Property property : globalParamList){
String val = globalMap.get(property.getProp());
if (val != null){
property.setValue(val);
}
}
return JSONObject.toJSONString(globalParamList);
}
return null;
}
}

192
escheduler-common/src/main/java/cn/escheduler/common/utils/PropertyUtils.java

@ -0,0 +1,192 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import static cn.escheduler.common.Constants.COMMON_PROPERTIES_PATH;
import static cn.escheduler.common.Constants.HADOOP_PROPERTIES_PATH;
/**
* property utils
* single instance
*/
public class PropertyUtils {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(PropertyUtils.class);
private static final Properties properties = new Properties();
private static final PropertyUtils propertyUtils = new PropertyUtils();
private PropertyUtils(){
init();
}
private void init(){
String[] propertyFiles = new String[]{HADOOP_PROPERTIES_PATH,COMMON_PROPERTIES_PATH};
for (String fileName : propertyFiles) {
InputStream fis = null;
try {
fis = PropertyUtils.class.getResourceAsStream(fileName);
properties.load(fis);
} catch (IOException e) {
logger.error(e.getMessage(), e);
System.exit(1);
} finally {
IOUtils.closeQuietly(fis);
}
}
}
/*
public static PropertyUtils getInstance(){
return propertyUtils;
}
*/
/**
* get property value
*
* @param key property name
* @return
*/
public static String getString(String key) {
return properties.getProperty(key.trim());
}
/**
* get property value
*
* @param key property name
* @return get property int value , if key == null, then return -1
*/
public static int getInt(String key) {
return getInt(key, -1);
}
/**
*
* @param key
* @param defaultValue
* @return
*/
public static int getInt(String key, int defaultValue) {
String value = getString(key);
if (value == null) {
return defaultValue;
}
try {
return Integer.parseInt(value);
} catch (NumberFormatException e) {
logger.info(e.getMessage(),e);
}
return defaultValue;
}
/**
* get property value
*
* @param key property name
* @return
*/
public static Boolean getBoolean(String key) {
String value = properties.getProperty(key.trim());
if(null != value){
return Boolean.parseBoolean(value);
}
return null;
}
/**
* get property long value
* @param key
* @param defaultVal
* @return
*/
public static long getLong(String key, long defaultVal) {
String val = getString(key);
return val == null ? defaultVal : Long.parseLong(val);
}
/**
*
* @param key
* @return
*/
public static long getLong(String key) {
return getLong(key,-1);
}
/**
*
* @param key
* @param defaultVal
* @return
*/
public double getDouble(String key, double defaultVal) {
String val = getString(key);
return val == null ? defaultVal : Double.parseDouble(val);
}
/**
* get array
* @param key property name
* @param splitStr separator
* @return
*/
public static String[] getArray(String key, String splitStr) {
String value = getString(key);
if (value == null) {
return null;
}
try {
String[] propertyArray = value.split(splitStr);
return propertyArray;
} catch (NumberFormatException e) {
logger.info(e.getMessage(),e);
}
return null;
}
/**
*
* @param key
* @param type
* @param defaultValue
* @param <T>
* @return get enum value
*/
public <T extends Enum<T>> T getEnum(String key, Class<T> type,
T defaultValue) {
String val = getString(key);
return val == null ? defaultValue : Enum.valueOf(type, val);
}
}

74
escheduler-common/src/main/java/cn/escheduler/common/utils/TaskParametersUtils.java

@ -0,0 +1,74 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import cn.escheduler.common.enums.TaskType;
import cn.escheduler.common.task.AbstractParameters;
import cn.escheduler.common.task.dependent.DependentParameters;
import cn.escheduler.common.task.mr.MapreduceParameters;
import cn.escheduler.common.task.procedure.ProcedureParameters;
import cn.escheduler.common.task.python.PythonParameters;
import cn.escheduler.common.task.shell.ShellParameters;
import cn.escheduler.common.task.spark.SparkParameters;
import cn.escheduler.common.task.sql.SqlParameters;
import cn.escheduler.common.task.subprocess.SubProcessParameters;
import org.apache.commons.lang3.EnumUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* task parameters utils
*/
public class TaskParametersUtils {
private static Logger logger = LoggerFactory.getLogger(TaskParametersUtils.class);
/**
* get task parameters
* @param taskType
* @param parameter
* @return task parameters
*/
public static AbstractParameters getParameters(String taskType, String parameter) {
try {
switch (EnumUtils.getEnum(TaskType.class,taskType)) {
case SUB_PROCESS:
return JSONUtils.parseObject(parameter, SubProcessParameters.class);
case SHELL:
return JSONUtils.parseObject(parameter, ShellParameters.class);
case PROCEDURE:
return JSONUtils.parseObject(parameter, ProcedureParameters.class);
case SQL:
return JSONUtils.parseObject(parameter, SqlParameters.class);
case MR:
return JSONUtils.parseObject(parameter, MapreduceParameters.class);
case SPARK:
return JSONUtils.parseObject(parameter, SparkParameters.class);
case PYTHON:
return JSONUtils.parseObject(parameter, PythonParameters.class);
case DEPENDENT:
return JSONUtils.parseObject(parameter, DependentParameters.class);
default:
return null;
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return null;
}
}

134
escheduler-common/src/main/java/cn/escheduler/common/utils/dependent/DependentDateUtils.java

@ -0,0 +1,134 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils.dependent;
import cn.escheduler.common.model.DateInterval;
import cn.escheduler.common.utils.DateUtils;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
public class DependentDateUtils {
/**
* get last day interval list
* @param businessDate
* @param hourNumber
* @return
*/
public static List<DateInterval> getLastHoursInterval(Date businessDate, int hourNumber){
List<DateInterval> dateIntervals = new ArrayList<>();
for(int index = hourNumber; index > 0; index--){
Date lastHour = DateUtils.getSomeHourOfDay(businessDate, index);
Date beginTime = DateUtils.getStartOfHour(lastHour);
Date endTime = DateUtils.getEndOfHour(lastHour);
dateIntervals.add(new DateInterval(beginTime, endTime));
}
return dateIntervals;
}
/**
* get last day interval list
* @param businessDate
* @param someDay
* @return
*/
public static List<DateInterval> getLastDayInterval(Date businessDate, int someDay){
List<DateInterval> dateIntervals = new ArrayList<>();
for(int index = someDay; index > 0; index--){
Date lastDay = DateUtils.getSomeDay(businessDate, -index);
Date beginTime = DateUtils.getStartOfDay(lastDay);
Date endTime = DateUtils.getEndOfDay(lastDay);
dateIntervals.add(new DateInterval(beginTime, endTime));
}
return dateIntervals;
}
/**
* get interval between last month first day and last day
* @param businessDate
* @return
*/
public static List<DateInterval> getLastMonthInterval(Date businessDate) {
Date firstDayThisMonth = DateUtils.getFirstDayOfMonth(businessDate);
Date lastDay = DateUtils.getSomeDay(firstDayThisMonth, -1);
Date firstDay = DateUtils.getFirstDayOfMonth(lastDay);
return getDateIntervalListBetweenTwoDates( firstDay, lastDay);
}
/**
* get interval on first/last day of the last month
* @param businessDate
* @param isBeginDay
* @return
*/
public static List<DateInterval> getLastMonthBeginInterval(Date businessDate, boolean isBeginDay) {
Date firstDayThisMonth = DateUtils.getFirstDayOfMonth(businessDate);
Date lastDay = DateUtils.getSomeDay(firstDayThisMonth, -1);
Date firstDay = DateUtils.getFirstDayOfMonth(lastDay);
if(isBeginDay){
return getDateIntervalListBetweenTwoDates(firstDay, firstDay);
}else{
return getDateIntervalListBetweenTwoDates(lastDay, lastDay);
}
}
/**
* get interval between monday to sunday of last week
* default set monday the first day of week
* @param businessDate
* @return
*/
public static List<DateInterval> getLastWeekInterval(Date businessDate) {
Date mondayThisWeek = DateUtils.getMonday(businessDate);
Date sunday = DateUtils.getSomeDay(mondayThisWeek, -1);
Date monday = DateUtils.getMonday(sunday);
return getDateIntervalListBetweenTwoDates(monday, sunday);
}
/**
* get interval on the day of last week
* default set monday the first day of week
* @param businessDate
* @param dayOfWeek monday:1,tuesday:2,wednesday:3,thursday:4,friday:5,saturday:6,sunday:7
* @return
*/
public static List<DateInterval> getLastWeekOneDayInterval(Date businessDate, int dayOfWeek) {
Date mondayThisWeek = DateUtils.getMonday(businessDate);
Date sunday = DateUtils.getSomeDay(mondayThisWeek, -1);
Date monday = DateUtils.getMonday(sunday);
Date destDay = DateUtils.getSomeDay(monday, dayOfWeek -1);
return getDateIntervalListBetweenTwoDates(destDay, destDay);
}
public static List<DateInterval> getDateIntervalListBetweenTwoDates(Date firstDay, Date lastDay) {
List<DateInterval> dateIntervals = new ArrayList<>();
while(!firstDay.after(lastDay)){
Date beginTime = DateUtils.getStartOfDay(firstDay);
Date endTime = DateUtils.getEndOfDay(firstDay);
dateIntervals.add(new DateInterval(beginTime, endTime));
firstDay = DateUtils.getSomeDay(firstDay, 1);
}
return dateIntervals;
}
}

65
escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/BusinessTimeUtils.java

@ -0,0 +1,65 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils.placeholder;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.CommandType;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import static cn.escheduler.common.Constants.PARAMETER_FORMAT_DATE;
import static cn.escheduler.common.Constants.PARAMETER_FORMAT_TIME;
import static cn.escheduler.common.utils.DateUtils.format;
import static org.apache.commons.lang.time.DateUtils.addDays;
/**
* business time utils
*/
public class BusinessTimeUtils {
/**
* get business time in parameters by different command types
*
* @param commandType command type
* @param runTime run time or schedule time
*/
public static Map<String, String> getBusinessTime(CommandType commandType, Date runTime) {
Date businessDate = runTime;
switch (commandType) {
case COMPLEMENT_DATA:
break;
case START_PROCESS:
case START_CURRENT_TASK_PROCESS:
case RECOVER_TOLERANCE_FAULT_PROCESS:
case RECOVER_SUSPENDED_PROCESS:
case START_FAILURE_TASK_PROCESS:
case SCHEDULER:
default:
businessDate = addDays(new Date(), -1);
break;
}
Date businessCurrentDate = addDays(businessDate, 1);
Map<String, String> result = new HashMap<>();
result.put(Constants.PARAMETER_CURRENT_DATE, format(businessCurrentDate, PARAMETER_FORMAT_DATE));
result.put(Constants.PARAMETER_BUSINESS_DATE, format(businessDate, PARAMETER_FORMAT_DATE));
result.put(Constants.PARAMETER_DATETIME, format(businessCurrentDate, PARAMETER_FORMAT_TIME));
return result;
}
}

99
escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/PlaceholderUtils.java

@ -0,0 +1,99 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils.placeholder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
/**
* placeholder utils
*/
public class PlaceholderUtils {
private static final Logger logger = LoggerFactory.getLogger(PlaceholderUtils.class);
/**
* Prefix of the position to be replaced
*/
public static final String placeholderPrefix = "${";
/**
* The suffix of the position to be replaced
*/
public static final String placeholderSuffix = "}";
/**
* Replaces all placeholders of format {@code ${name}} with the value returned
* from the supplied {@link PropertyPlaceholderHelper.PlaceholderResolver}.
*
* @param value the value containing the placeholders to be replaced
* @param paramsMap placeholder data dictionary
* @return the supplied value with placeholders replaced inline
*/
public static String replacePlaceholders(String value, Map<String, String> paramsMap, boolean ignoreUnresolvablePlaceholders) {
//replacement tool, parameter key will be replaced by value,if can't match , will throw an exception
PropertyPlaceholderHelper strictHelper = getPropertyPlaceholderHelper(false);
//Non-strict replacement tool implementation, when the position to be replaced does not get the corresponding value, the current position is ignored, and the next position is replaced.
PropertyPlaceholderHelper nonStrictHelper = getPropertyPlaceholderHelper(true);
PropertyPlaceholderHelper helper = (ignoreUnresolvablePlaceholders ? nonStrictHelper : strictHelper);
//the PlaceholderResolver to use for replacement
return helper.replacePlaceholders(value, new PropertyPlaceholderResolver(value, paramsMap));
}
/**
* Creates a new {@code PropertyPlaceholderHelper} that uses the supplied prefix and suffix.
* @param ignoreUnresolvablePlaceholders indicates whether unresolvable placeholders should
* be ignored ({@code true}) or cause an exception ({@code false})
*/
public static PropertyPlaceholderHelper getPropertyPlaceholderHelper(boolean ignoreUnresolvablePlaceholders) {
return new PropertyPlaceholderHelper(placeholderPrefix, placeholderSuffix, null, ignoreUnresolvablePlaceholders);
}
/**
* Placeholder replacement resolver
*/
private static class PropertyPlaceholderResolver implements PropertyPlaceholderHelper.PlaceholderResolver {
private final String value;
private final Map<String, String> paramsMap;
public PropertyPlaceholderResolver(String value, Map<String, String> paramsMap) {
this.value = value;
this.paramsMap = paramsMap;
}
@Override
public String resolvePlaceholder(String placeholderName) {
try {
return paramsMap.get(placeholderName);
} catch (Exception ex) {
logger.error(String.format("resolve placeholder '%s' in [ %s ]" , placeholderName, value), ex);
return null;
}
}
}
}

254
escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/PropertyPlaceholderHelper.java

@ -0,0 +1,254 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils.placeholder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.util.*;
/**
* Utility class for working with Strings that have placeholder values in them. A placeholder takes the form
* {@code ${name}}. Using {@code PropertyPlaceholderHelper} these placeholders can be substituted for
* user-supplied values. <p> Values for substitution can be supplied using a {@link Properties} instance or
* using a {@link PlaceholderResolver}.
*
* @author Juergen Hoeller
* @author Rob Harrop
* @since 3.0
*/
public class PropertyPlaceholderHelper {
private static final Log logger = LogFactory.getLog(PropertyPlaceholderHelper.class);
private static final Map<String, String> wellKnownSimplePrefixes = new HashMap<String, String>(4);
static {
wellKnownSimplePrefixes.put("}", "{");
wellKnownSimplePrefixes.put("]", "[");
wellKnownSimplePrefixes.put(")", "(");
}
private final String placeholderPrefix;
private final String placeholderSuffix;
private final String simplePrefix;
private final String valueSeparator;
private final boolean ignoreUnresolvablePlaceholders;
/**
* Creates a new {@code PropertyPlaceholderHelper} that uses the supplied prefix and suffix.
* Unresolvable placeholders are ignored.
* @param placeholderPrefix the prefix that denotes the start of a placeholder
* @param placeholderSuffix the suffix that denotes the end of a placeholder
*/
public PropertyPlaceholderHelper(String placeholderPrefix, String placeholderSuffix) {
this(placeholderPrefix, placeholderSuffix, null, true);
}
/**
* Creates a new {@code PropertyPlaceholderHelper} that uses the supplied prefix and suffix.
* @param placeholderPrefix the prefix that denotes the start of a placeholder
* @param placeholderSuffix the suffix that denotes the end of a placeholder
* @param valueSeparator the separating character between the placeholder variable
* and the associated default value, if any
* @param ignoreUnresolvablePlaceholders indicates whether unresolvable placeholders should
* be ignored ({@code true}) or cause an exception ({@code false})
*/
public PropertyPlaceholderHelper(String placeholderPrefix, String placeholderSuffix,
String valueSeparator, boolean ignoreUnresolvablePlaceholders) {
notNull(placeholderPrefix, "'placeholderPrefix' must not be null");
notNull(placeholderSuffix, "'placeholderSuffix' must not be null");
this.placeholderPrefix = placeholderPrefix;
this.placeholderSuffix = placeholderSuffix;
String simplePrefixForSuffix = wellKnownSimplePrefixes.get(this.placeholderSuffix);
if (simplePrefixForSuffix != null && this.placeholderPrefix.endsWith(simplePrefixForSuffix)) {
this.simplePrefix = simplePrefixForSuffix;
}
else {
this.simplePrefix = this.placeholderPrefix;
}
this.valueSeparator = valueSeparator;
this.ignoreUnresolvablePlaceholders = ignoreUnresolvablePlaceholders;
}
/**
* Replaces all placeholders of format {@code ${name}} with the corresponding
* property from the supplied {@link Properties}.
* @param value the value containing the placeholders to be replaced
* @param properties the {@code Properties} to use for replacement
* @return the supplied value with placeholders replaced inline
*/
public String replacePlaceholders(String value, final Properties properties) {
notNull(properties, "'properties' must not be null");
return replacePlaceholders(value, new PlaceholderResolver() {
@Override
public String resolvePlaceholder(String placeholderName) {
return properties.getProperty(placeholderName);
}
});
}
/**
* Replaces all placeholders of format {@code ${name}} with the value returned
* from the supplied {@link PlaceholderResolver}.
* @param value the value containing the placeholders to be replaced
* @param placeholderResolver the {@code PlaceholderResolver} to use for replacement
* @return the supplied value with placeholders replaced inline
*/
public String replacePlaceholders(String value, PlaceholderResolver placeholderResolver) {
notNull(value, "'value' must not be null");
return parseStringValue(value, placeholderResolver, new HashSet<String>());
}
protected String parseStringValue(
String value, PlaceholderResolver placeholderResolver, Set<String> visitedPlaceholders) {
StringBuilder result = new StringBuilder(value);
int startIndex = value.indexOf(this.placeholderPrefix);
while (startIndex != -1) {
int endIndex = findPlaceholderEndIndex(result, startIndex);
if (endIndex != -1) {
String placeholder = result.substring(startIndex + this.placeholderPrefix.length(), endIndex);
String originalPlaceholder = placeholder;
if (!visitedPlaceholders.add(originalPlaceholder)) {
throw new IllegalArgumentException(
"Circular placeholder reference '" + originalPlaceholder + "' in property definitions");
}
// Recursive invocation, parsing placeholders contained in the placeholder key.
placeholder = parseStringValue(placeholder, placeholderResolver, visitedPlaceholders);
// Now obtain the value for the fully resolved key...
String propVal = placeholderResolver.resolvePlaceholder(placeholder);
if (propVal == null && this.valueSeparator != null) {
int separatorIndex = placeholder.indexOf(this.valueSeparator);
if (separatorIndex != -1) {
String actualPlaceholder = placeholder.substring(0, separatorIndex);
String defaultValue = placeholder.substring(separatorIndex + this.valueSeparator.length());
propVal = placeholderResolver.resolvePlaceholder(actualPlaceholder);
if (propVal == null) {
propVal = defaultValue;
}
}
}
if (propVal != null) {
// Recursive invocation, parsing placeholders contained in the
// previously resolved placeholder value.
propVal = parseStringValue(propVal, placeholderResolver, visitedPlaceholders);
result.replace(startIndex, endIndex + this.placeholderSuffix.length(), propVal);
if (logger.isTraceEnabled()) {
logger.trace("Resolved placeholder '" + placeholder + "'");
}
startIndex = result.indexOf(this.placeholderPrefix, startIndex + propVal.length());
}
else if (this.ignoreUnresolvablePlaceholders) {
// Proceed with unprocessed value.
startIndex = result.indexOf(this.placeholderPrefix, endIndex + this.placeholderSuffix.length());
}
else {
throw new IllegalArgumentException("Could not resolve placeholder '" +
placeholder + "'" + " in value \"" + value + "\"");
}
visitedPlaceholders.remove(originalPlaceholder);
}
else {
startIndex = -1;
}
}
return result.toString();
}
private int findPlaceholderEndIndex(CharSequence buf, int startIndex) {
int index = startIndex + this.placeholderPrefix.length();
int withinNestedPlaceholder = 0;
while (index < buf.length()) {
if (substringMatch(buf, index, this.placeholderSuffix)) {
if (withinNestedPlaceholder > 0) {
withinNestedPlaceholder--;
index = index + this.placeholderSuffix.length();
}
else {
return index;
}
}
else if (substringMatch(buf, index, this.simplePrefix)) {
withinNestedPlaceholder++;
index = index + this.simplePrefix.length();
}
else {
index++;
}
}
return -1;
}
/**
* Strategy interface used to resolve replacement values for placeholders contained in Strings.
*/
public interface PlaceholderResolver {
/**
* Resolve the supplied placeholder name to the replacement value.
* @param placeholderName the name of the placeholder to resolve
* @return the replacement value, or {@code null} if no replacement is to be made
*/
String resolvePlaceholder(String placeholderName);
}
/**
* Test whether the given string matches the given substring
* at the given index.
* @param str the original string (or StringBuilder)
* @param index the index in the original string to start matching against
* @param substring the substring to match at the given index
*/
public static boolean substringMatch(CharSequence str, int index, CharSequence substring) {
for (int j = 0; j < substring.length(); j++) {
int i = index + j;
if (i >= str.length() || str.charAt(i) != substring.charAt(j)) {
return false;
}
}
return true;
}
/**
* Assert that an object is not {@code null}.
* <pre class="code">Assert.notNull(clazz, "The class must not be null");</pre>
* @param object the object to check
* @param message the exception message to use if the assertion fails
* @throws IllegalArgumentException if the object is {@code null}
*/
public static void notNull(Object object, String message) {
if (object == null) {
throw new IllegalArgumentException(message);
}
}
}

512
escheduler-common/src/main/java/cn/escheduler/common/utils/placeholder/TimePlaceholderUtils.java

@ -0,0 +1,512 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils.placeholder;
import cn.escheduler.common.Constants;
import cn.escheduler.common.utils.DateUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import static cn.escheduler.common.Constants.*;
/**
* time place holder utils
*/
public class TimePlaceholderUtils {
private static final Logger logger = LoggerFactory.getLogger(TimePlaceholderUtils.class);
/**
* Prefix of the position to be replaced
*/
public static final String placeholderPrefix = "$[";
/**
* The suffix of the position to be replaced
*/
public static final String placeholderSuffix = "]";
/**
* Replaces all placeholders of format {@code ${name}} with the value returned
* from the supplied {@link PropertyPlaceholderHelper.PlaceholderResolver}.
*
* @param value the value containing the placeholders to be replaced
* @param date custom date
* @param ignoreUnresolvablePlaceholders
* @return the supplied value with placeholders replaced inline
*/
public static String replacePlaceholders(String value, Date date, boolean ignoreUnresolvablePlaceholders) {
PropertyPlaceholderHelper strictHelper = getPropertyPlaceholderHelper(false);
PropertyPlaceholderHelper nonStrictHelper = getPropertyPlaceholderHelper(true);
PropertyPlaceholderHelper helper = (ignoreUnresolvablePlaceholders ? nonStrictHelper : strictHelper);
return helper.replacePlaceholders(value, new TimePlaceholderResolver(value, date));
}
/**
* Creates a new {@code PropertyPlaceholderHelper} that uses the supplied prefix and suffix.
* @param ignoreUnresolvablePlaceholders indicates whether unresolvable placeholders should
* be ignored ({@code true}) or cause an exception ({@code false})
*/
private static PropertyPlaceholderHelper getPropertyPlaceholderHelper(boolean ignoreUnresolvablePlaceholders) {
return new PropertyPlaceholderHelper(placeholderPrefix, placeholderSuffix, null, ignoreUnresolvablePlaceholders);
}
/**
* calculate expression's value
*
* @param expression
* @return
*/
public static Integer calculate(String expression) {
expression = StringUtils.trim(expression);
expression = convert(expression);
List<String> result = string2List(expression);
result = convert2SuffixList(result);
return calculate(result);
}
/**
* Change the sign in the expression to P (positive) N (negative)
*
* @param expression
* @return eg. "-3+-6*(+8)-(-5) -> S3+S6*(P8)-(S5)"
*/
private static String convert(String expression) {
char[] arr = expression.toCharArray();
for (int i = 0; i < arr.length; i++) {
if (arr[i] == SUBTRACT_CHAR) {
if (i == 0) {
arr[i] = N;
} else {
char c = arr[i - 1];
if (c == ADD_CHAR || c == SUBTRACT_CHAR || c == MULTIPLY_CHAR || c == DIVISION_CHAR || c == LEFT_BRACE_CHAR) {
arr[i] = N;
}
}
} else if (arr[i] == ADD_CHAR) {
if (i == 0) {
arr[i] = P;
} else {
char c = arr[i - 1];
if (c == ADD_CHAR || c == SUBTRACT_CHAR || c == MULTIPLY_CHAR || c == DIVISION_CHAR || c == LEFT_BRACE_CHAR) {
arr[i] = P;
}
}
}
}
return new String(arr);
}
/**
* to suffix expression
*
* @param srcList
* @return
*/
private static List<String> convert2SuffixList(List<String> srcList) {
List<String> result = new ArrayList<>();
Stack<String> stack = new Stack<>();
for (int i = 0; i < srcList.size(); i++) {
if (Character.isDigit(srcList.get(i).charAt(0))) {
result.add(srcList.get(i));
} else {
switch (srcList.get(i).charAt(0)) {
case LEFT_BRACE_CHAR:
stack.push(srcList.get(i));
break;
case RIGHT_BRACE_CHAR:
while (!LEFT_BRACE_STRING.equals(stack.peek())) {
result.add(stack.pop());
}
stack.pop();
break;
default:
while (!stack.isEmpty() && compare(stack.peek(), srcList.get(i))) {
result.add(stack.pop());
}
stack.push(srcList.get(i));
break;
}
}
}
while (!stack.isEmpty()) {
result.add(stack.pop());
}
return result;
}
/**
* Calculate the suffix expression
*
* @param result
* @return
*/
private static Integer calculate(List<String> result) {
Stack<Integer> stack = new Stack<>();
for (int i = 0; i < result.size(); i++) {
if (Character.isDigit(result.get(i).charAt(0))) {
stack.push(Integer.parseInt(result.get(i)));
} else {
Integer backInt = stack.pop();
Integer frontInt = 0;
char op = result.get(i).charAt(0);
if (!(op == P || op == N)) {
frontInt = stack.pop();
}
Integer res = 0;
switch (result.get(i).charAt(0)) {
case P:
res = frontInt + backInt;
break;
case N:
res = frontInt - backInt;
break;
case ADD_CHAR:
res = frontInt + backInt;
break;
case SUBTRACT_CHAR:
res = frontInt - backInt;
break;
case MULTIPLY_CHAR:
res = frontInt * backInt;
break;
case DIVISION_CHAR:
res = frontInt / backInt;
break;
default:
break;
}
stack.push(res);
}
}
return stack.pop();
}
/**
* string to list
*
* @param expression
* @return list
*/
private static List<String> string2List(String expression) {
List<String> result = new ArrayList<>();
String num = "";
for (int i = 0; i < expression.length(); i++) {
if (Character.isDigit(expression.charAt(i))) {
num = num + expression.charAt(i);
} else {
if (!num.isEmpty()) {
result.add(num);
}
result.add(expression.charAt(i) + "");
num = "";
}
}
if (!num.isEmpty()) {
result.add(num);
}
return result;
}
/**
* compare loginUser level
*
* @param peek
* @param cur
* @return true or false
*/
private static boolean compare(String peek, String cur) {
if (MULTIPLY_STRING.equals(peek) && (DIVISION_STRING.equals(cur) || MULTIPLY_STRING.equals(cur) || ADD_STRING.equals(cur) || SUBTRACT_STRING.equals(cur))) {
return true;
} else if (DIVISION_STRING.equals(peek) && (DIVISION_STRING.equals(cur) || MULTIPLY_STRING.equals(cur) || ADD_STRING.equals(cur) || SUBTRACT_STRING.equals(cur))) {
return true;
} else if (ADD_STRING.equals(peek) && (ADD_STRING.equals(cur) || SUBTRACT_STRING.equals(cur))) {
return true;
} else {
return SUBTRACT_STRING.equals(peek) && (ADD_STRING.equals(cur) || SUBTRACT_STRING.equals(cur));
}
}
/**
* Placeholder replacement resolver
*/
private static class TimePlaceholderResolver implements
PropertyPlaceholderHelper.PlaceholderResolver {
private final String value;
private final Date date;
public TimePlaceholderResolver(String value, Date date) {
this.value = value;
this.date = date;
}
@Override
public String resolvePlaceholder(String placeholderName) {
try {
return calculateTime(placeholderName, date);
} catch (Exception ex) {
logger.error(String.format("resolve placeholder '%s' in [ %s ]" , placeholderName, value), ex);
return null;
}
}
}
/**
* calculate time
*
* @param date date
* @return calculate time
*/
private static String calculateTime(String expression, Date date) {
// After N years: $[add_months(yyyyMMdd,12*N)], the first N months: $[add_months(yyyyMMdd,-N)], etc
String value;
try {
if (expression.startsWith(Constants.TIMESTAMP)) {
String timeExpression = expression.substring(Constants.TIMESTAMP.length() + 1, expression.length() - 1);
Map.Entry<Date, String> entry = calcTimeExpression(timeExpression, date);
String dateStr = DateUtils.format(entry.getKey(), entry.getValue());
Date timestamp = DateUtils.parse(dateStr, Constants.PARAMETER_FORMAT_TIME);
value = String.valueOf(timestamp.getTime() / 1000);
} else {
Map.Entry<Date, String> entry = calcTimeExpression(expression, date);
value = DateUtils.format(entry.getKey(), entry.getValue());
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw e;
}
return value;
}
/**
* calculate time expresstion
*
* @return <date, date format>
*/
public static Map.Entry<Date, String> calcTimeExpression(String expression, Date date) {
Map.Entry<Date, String> resultEntry;
if (expression.startsWith(Constants.ADD_MONTHS)) {
resultEntry = calcMonths(expression, date);
} else if (expression.startsWith(Constants.MONTH_BEGIN)) {
resultEntry = calcMonthBegin(expression, date);
} else if (expression.startsWith(Constants.MONTH_END)) {
resultEntry = calcMonthEnd(expression, date);
} else if (expression.startsWith(Constants.WEEK_BEGIN)) {
resultEntry = calcWeekStart(expression, date);
} else if (expression.startsWith(Constants.WEEK_END)) {
resultEntry = calcWeekEnd(expression, date);
} else {
resultEntry = calcMinutes(expression, date);
}
return resultEntry;
}
/**
* get first day of month
*
* @return
*/
public static Map.Entry<Date, String> calcMonthBegin(String expression, Date date) {
String addMonthExpr = expression.substring(Constants.MONTH_BEGIN.length() + 1, expression.length() - 1);
String[] params = addMonthExpr.split(Constants.COMMA);
if (params.length == 2) {
String dateFormat = params[0];
String dayExpr = params[1];
Integer day = calculate(dayExpr);
Date targetDate = DateUtils.getFirstDayOfMonth(date);
targetDate = org.apache.commons.lang.time.DateUtils.addDays(targetDate, day);
return new AbstractMap.SimpleImmutableEntry<>(targetDate, dateFormat);
}
throw new RuntimeException("expression not valid");
}
/**
* get last day of month
*
*/
public static Map.Entry<Date, String> calcMonthEnd(String expression, Date date) {
String addMonthExpr = expression.substring(Constants.MONTH_END.length() + 1, expression.length() - 1);
String[] params = addMonthExpr.split(Constants.COMMA);
if (params.length == 2) {
String dateFormat = params[0];
String dayExpr = params[1];
Integer day = calculate(dayExpr);
Date targetDate = DateUtils.getLastDayOfMonth(date);
targetDate = org.apache.commons.lang.time.DateUtils.addDays(targetDate, day);
return new AbstractMap.SimpleImmutableEntry<>(targetDate, dateFormat);
}
throw new RuntimeException("expression not valid");
}
/**
* get first day of week
*
* @return monday
*/
public static Map.Entry<Date, String> calcWeekStart(String expression, Date date) {
String addMonthExpr = expression.substring(Constants.WEEK_BEGIN.length() + 1, expression.length() - 1);
String[] params = addMonthExpr.split(Constants.COMMA);
if (params.length == 2) {
String dateFormat = params[0];
String dayExpr = params[1];
Integer day = calculate(dayExpr);
Date targetDate = DateUtils.getMonday(date);
targetDate = org.apache.commons.lang.time.DateUtils.addDays(targetDate, day);
return new AbstractMap.SimpleImmutableEntry<>(targetDate, dateFormat);
}
throw new RuntimeException("expression not valid");
}
/**
* get last day of week
*
*/
public static Map.Entry<Date, String> calcWeekEnd(String expression, Date date) {
String addMonthExpr = expression.substring(Constants.WEEK_END.length() + 1, expression.length() - 1);
String[] params = addMonthExpr.split(Constants.COMMA);
if (params.length == 2) {
String dateFormat = params[0];
String dayExpr = params[1];
Integer day = calculate(dayExpr);
Date targetDate = DateUtils.getSunday(date);
targetDate = org.apache.commons.lang.time.DateUtils.addDays(targetDate, day);
return new AbstractMap.SimpleImmutableEntry<>(targetDate, dateFormat);
}
throw new RuntimeException("Expression not valid");
}
/**
* calc months expression
*
* @return <date, format>
*/
public static Map.Entry<Date, String> calcMonths(String expression, Date date) {
String addMonthExpr = expression.substring(Constants.ADD_MONTHS.length() + 1, expression.length() - 1);
String[] params = addMonthExpr.split(Constants.COMMA);
if (params.length == 2) {
String dateFormat = params[0];
String monthExpr = params[1];
Integer addMonth = calculate(monthExpr);
Date targetDate = org.apache.commons.lang.time.DateUtils.addMonths(date, addMonth);
return new AbstractMap.SimpleImmutableEntry<>(targetDate, dateFormat);
}
throw new RuntimeException("expression not valid");
}
/**
* calculate time expression
*
*
* @return <date, format>
*/
public static Map.Entry<Date, String> calcMinutes(String expression, Date date) {
if (expression.contains("+")) {
int index = expression.lastIndexOf('+');
if (Character.isDigit(expression.charAt(index + 1))) {
String addMinuteExpr = expression.substring(index + 1);
Date targetDate = org.apache.commons.lang.time.DateUtils
.addMinutes(date, calcMinutes(addMinuteExpr));
String dateFormat = expression.substring(0, index);
return new AbstractMap.SimpleImmutableEntry<>(targetDate, dateFormat);
}
} else if (expression.contains("-")) {
int index = expression.lastIndexOf('-');
if (Character.isDigit(expression.charAt(index + 1))) {
String addMinuteExpr = expression.substring(index + 1);
Date targetDate = org.apache.commons.lang.time.DateUtils
.addMinutes(date, 0 - calcMinutes(addMinuteExpr));
String dateFormat = expression.substring(0, index);
return new AbstractMap.SimpleImmutableEntry<>(targetDate, dateFormat);
}
// yyyy-MM-dd/HH:mm:ss
return new AbstractMap.SimpleImmutableEntry<>(date, expression);
}
// $[HHmmss]
return new AbstractMap.SimpleImmutableEntry<>(date, expression);
}
/**
* calculate need minutes
*
* @param minuteExpression
* @return
*/
public static Integer calcMinutes(String minuteExpression) {
int index = minuteExpression.indexOf("/");
String calcExpression;
if (index == -1) {
calcExpression = String.format("60*24*(%s)", minuteExpression);
} else {
calcExpression = String.format("60*24*(%s)%s", minuteExpression.substring(0, index),
minuteExpression.substring(index));
}
return calculate(calcExpression);
}
}

340
escheduler-common/src/main/java/cn/escheduler/common/zk/AbstractZKClient.java

@ -0,0 +1,340 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.zk;
import cn.escheduler.common.Constants;
import cn.escheduler.common.IStoppable;
import cn.escheduler.common.utils.DateUtils;
import cn.escheduler.common.utils.OSUtils;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.framework.state.ConnectionStateListener;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.zookeeper.CreateMode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import static cn.escheduler.common.Constants.*;
/**
* abstract zookeeper client
*/
public abstract class AbstractZKClient {
private static final Logger logger = LoggerFactory.getLogger(AbstractZKClient.class);
/**
* load configuration file
*/
protected static Configuration conf;
protected CuratorFramework zkClient = null;
/**
* server node parent path
*/
protected String deadServerZNodeParentPath = null;
/**
* master node parent path
*/
protected String masterZNodeParentPath = null;
/**
* worker node parent path
*/
protected String workerZNodeParentPath = null;
/**
* server stop or not
*/
protected IStoppable stoppable = null;
static {
try {
conf = new PropertiesConfiguration(Constants.ZOOKEEPER_PROPERTIES_PATH);
}catch (ConfigurationException e){
logger.error("load configuration failed : " + e.getMessage(),e);
System.exit(1);
}
}
public AbstractZKClient() {
StringBuilder sb = new StringBuilder();
String[] zookeeperParamslist = conf.getStringArray(Constants.ZOOKEEPER_QUORUM);
for (String param : zookeeperParamslist) {
sb.append(param).append(Constants.COMMA);
}
if(sb.length() > 0){
sb.deleteCharAt(sb.length() - 1);
}
// retry strategy
RetryPolicy retryPolicy = new ExponentialBackoffRetry(
Integer.parseInt(conf.getString(Constants.ZOOKEEPER_RETRY_SLEEP)),
Integer.parseInt(conf.getString(Constants.ZOOKEEPER_RETRY_MAXTIME)));
try{
// crate zookeeper client
zkClient = CuratorFrameworkFactory.builder()
.connectString(sb.toString())
.retryPolicy(retryPolicy)
.sessionTimeoutMs(1000 * Integer.parseInt(conf.getString(Constants.ZOOKEEPER_SESSION_TIMEOUT)))
.connectionTimeoutMs(1000 * Integer.parseInt(conf.getString(Constants.ZOOKEEPER_CONNECTION_TIMEOUT)))
.build();
zkClient.start();
initStateLister();
}catch(Exception e){
logger.error("create zookeeper connect failed : " + e.getMessage(),e);
System.exit(-1);
}
}
/**
*
* register status monitoring events for zookeeper clients
*/
public void initStateLister(){
if(zkClient == null) {
return;
}
// add ConnectionStateListener monitoring zookeeper connection state
ConnectionStateListener csLister = new ConnectionStateListener() {
@Override
public void stateChanged(CuratorFramework client, ConnectionState newState) {
logger.info("state changed , current state : " + newState.name());
/**
* probably session expired
*/
if(newState == ConnectionState.LOST){
// if lost , then exit
logger.info("current zookeepr connection state : connection lost ");
}
}
};
zkClient.getConnectionStateListenable().addListener(csLister);
}
public void start() {
zkClient.start();
logger.info("zookeeper start ...");
}
public void close() {
zkClient.getZookeeperClient().close();
zkClient.close();
logger.info("zookeeper close ...");
}
/**
* heartbeat for zookeeper
* @param znode
*/
public void heartBeatForZk(String znode, String serverType){
try {
//check dead or not in zookeeper
if(zkClient.getState() == CuratorFrameworkState.STOPPED || checkIsDeadServer(znode, serverType)){
stoppable.stop("i was judged to death, release resources and stop myself");
return;
}
byte[] bytes = zkClient.getData().forPath(znode);
String resInfoStr = new String(bytes);
String[] splits = resInfoStr.split(Constants.COMMA);
if (splits.length != Constants.HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH){
return;
}
String str = splits[0] + Constants.COMMA +splits[1] + Constants.COMMA
+ OSUtils.cpuUsage() + Constants.COMMA
+ OSUtils.memoryUsage() + Constants.COMMA
+ splits[4] + Constants.COMMA
+ DateUtils.dateToString(new Date());
zkClient.setData().forPath(znode,str.getBytes());
} catch (Exception e) {
logger.error("heartbeat for zk failed : " + e.getMessage(), e);
stoppable.stop("heartbeat for zk exception, release resources and stop myself");
}
}
/**
* check dead server or not , if dead, stop self
*
* @param zNode node path
* @param serverType master or worker prefix
* @throws Exception
*/
protected boolean checkIsDeadServer(String zNode, String serverType) throws Exception {
//ip_sequenceno
String[] zNodesPath = zNode.split("\\/");
String ipSeqNo = zNodesPath[zNodesPath.length - 1];
String type = serverType.equals(MASTER_PREFIX) ? MASTER_PREFIX : WORKER_PREFIX;
String deadServerPath = deadServerZNodeParentPath + SINGLE_SLASH + type + UNDERLINE + ipSeqNo;
if(zkClient.checkExists().forPath(zNode) == null ||
zkClient.checkExists().forPath(deadServerPath) != null ){
return true;
}
return false;
}
/**
* init system znode
*/
protected void initSystemZNode(){
try {
// read master node parent path from conf
masterZNodeParentPath = conf.getString(Constants.ZOOKEEPER_ESCHEDULER_MASTERS);
// read worker node parent path from conf
workerZNodeParentPath = conf.getString(Constants.ZOOKEEPER_ESCHEDULER_WORKERS);
// read server node parent path from conf
deadServerZNodeParentPath = conf.getString(ZOOKEEPER_ESCHEDULER_DEAD_SERVERS);
if(zkClient.checkExists().forPath(deadServerZNodeParentPath) == null){
// create persistent dead server parent node
zkClient.create().creatingParentContainersIfNeeded()
.withMode(CreateMode.PERSISTENT).forPath(deadServerZNodeParentPath);
}
if(zkClient.checkExists().forPath(masterZNodeParentPath) == null){
// create persistent master parent node
zkClient.create().creatingParentContainersIfNeeded()
.withMode(CreateMode.PERSISTENT).forPath(masterZNodeParentPath);
}
if(zkClient.checkExists().forPath(workerZNodeParentPath) == null){
// create persistent worker parent node
zkClient.create().creatingParentContainersIfNeeded()
.withMode(CreateMode.PERSISTENT).forPath(workerZNodeParentPath);
}
} catch (Exception e) {
logger.error("init system znode failed : " + e.getMessage(),e);
}
}
public void removeDeadServerByHost(String host, String serverType) throws Exception {
List<String> deadServers = zkClient.getChildren().forPath(deadServerZNodeParentPath);
for(String serverPath : deadServers){
if(serverPath.startsWith(serverType+UNDERLINE+host)){
String server = deadServerZNodeParentPath + SINGLE_SLASH + serverPath;
zkClient.delete().forPath(server);
logger.info("{} server {} deleted from zk dead server path success" , serverType , host);
}
}
}
/**
* opType(add): if find dead server , then add to zk deadServerPath
* opType(delete): delete path from zk
*
* @param zNode node path
* @param serverType master or worker prefix
* @param opType delete or add
* @throws Exception
*/
public void handleDeadServer(String zNode, String serverType, String opType) throws Exception {
//ip_sequenceno
String[] zNodesPath = zNode.split("\\/");
String ipSeqNo = zNodesPath[zNodesPath.length - 1];
String type = serverType.equals(MASTER_PREFIX) ? MASTER_PREFIX : WORKER_PREFIX;
//check server restart, if restart , dead server path in zk should be delete
if(opType.equals(DELETE_ZK_OP)){
String[] ipAndSeqNo = ipSeqNo.split(UNDERLINE);
String ip = ipAndSeqNo[0];
removeDeadServerByHost(ip, serverType);
}else if(opType.equals(ADD_ZK_OP)){
String deadServerPath = deadServerZNodeParentPath + SINGLE_SLASH + type + UNDERLINE + ipSeqNo;
if(zkClient.checkExists().forPath(deadServerPath) == null){
//add dead server info to zk dead server path : /dead-servers/
zkClient.create().forPath(deadServerPath,(type + UNDERLINE + ipSeqNo).getBytes());
logger.info("{} server dead , and {} added to zk dead server path success" , serverType, zNode);
}
}
}
/**
* for stop server
* @param serverStoppable
*/
public void setStoppable(IStoppable serverStoppable){
this.stoppable = serverStoppable;
}
/**
* get active master num
* @return
*/
public int getActiveMasterNum(){
List<String> childrenList = new ArrayList<>();
try {
// read master node parent path from conf
masterZNodeParentPath = conf.getString(Constants.ZOOKEEPER_ESCHEDULER_MASTERS);
if(zkClient.checkExists().forPath(masterZNodeParentPath) != null){
childrenList = zkClient.getChildren().forPath(masterZNodeParentPath);
}
} catch (Exception e) {
logger.warn(e.getMessage(),e);
return childrenList.size();
}
return childrenList.size();
}
@Override
public String toString() {
return "AbstractZKClient{" +
"zkClient=" + zkClient +
", deadServerZNodeParentPath='" + deadServerZNodeParentPath + '\'' +
", masterZNodeParentPath='" + masterZNodeParentPath + '\'' +
", workerZNodeParentPath='" + workerZNodeParentPath + '\'' +
", stoppable=" + stoppable +
'}';
}
}

110
escheduler-common/src/main/resources/bin/escheduler-daemon.sh

@ -0,0 +1,110 @@
#!/usr/bin/env bash
usage="Usage: escheduler-daemon.sh (start|stop) <command> "
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
startStop=$1
shift
command=$1
shift
echo "Begin $startStop $command......"
BIN_DIR=`dirname $0`
BIN_DIR=`cd "$BIN_DIR"; pwd`
ESCHEDULER_HOME=$BIN_DIR/..
export JAVA_HOME=$JAVA_HOME
#export JAVA_HOME=/opt/soft/jdk
export HOSTNAME=`hostname`
export ESCHEDULER_PID_DIR=/tmp/
export ESCHEDULER_LOG_DIR=$ESCHEDULER_HOME/logs
export ESCHEDULER_CONF_DIR=$ESCHEDULER_HOME/conf
export ESCHEDULER_LIB_JARS=$ESCHEDULER_HOME/lib/*
export ESCHEDULER_OPTS="-server -Xmx16g -Xms4g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
export STOP_TIMEOUT=5
if [ ! -d "$ESCHEDULER_LOG_DIR" ]; then
mkdir $ESCHEDULER_LOG_DIR
fi
log=$ESCHEDULER_LOG_DIR/escheduler-$command-$HOSTNAME.out
pid=$ESCHEDULER_LOG_DIR/escheduler-$command.pid
cd $ESCHEDULER_HOME
if [ "$command" = "api-server" ]; then
LOG_FILE="-Dlogging.config=conf/apiserver_logback.xml"
CLASS=cn.escheduler.api.ApiApplicationServer
elif [ "$command" = "master-server" ]; then
LOG_FILE="-Dspring.config.location=conf/application_master.properties -Ddruid.mysql.usePingMethod=false"
CLASS=cn.escheduler.server.master.MasterServer
elif [ "$command" = "worker-server" ]; then
LOG_FILE="-Dlogback.configurationFile=conf/worker_logback.xml -Ddruid.mysql.usePingMethod=false"
CLASS=cn.escheduler.server.worker.WorkerServer
elif [ "$command" = "alert-server" ]; then
LOG_FILE="-Dlogback.configurationFile=conf/alert_logback.xml"
CLASS=cn.escheduler.alert.AlertServer
elif [ "$command" = "logger-server" ]; then
CLASS=cn.escheduler.server.rpc.LoggerServer
else
echo "Error: No command named \`$command' was found."
exit 1
fi
case $startStop in
(start)
[ -w "$ESCHEDULER_PID_DIR" ] || mkdir -p "$ESCHEDULER_PID_DIR"
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo $command running as process `cat $pid`. Stop it first.
exit 1
fi
fi
echo starting $command, logging to $log
exec_command="$LOG_FILE $ESCHEDULER_OPTS -classpath $ESCHEDULER_CONF_DIR:$ESCHEDULER_LIB_JARS $CLASS"
echo "nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 < /dev/null &"
nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 < /dev/null &
echo $! > $pid
;;
(stop)
if [ -f $pid ]; then
TARGET_PID=`cat $pid`
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo stopping $command
kill $TARGET_PID
sleep $STOP_TIMEOUT
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo "$command did not stop gracefully after $STOP_TIMEOUT seconds: killing with kill -9"
kill -9 $TARGET_PID
fi
else
echo no $command to stop
fi
rm -f $pid
else
echo no $command to stop
fi
;;
(*)
echo $usage
exit 1
;;
esac
echo "End $startStop $command."

28
escheduler-common/src/main/resources/common/common.properties

@ -0,0 +1,28 @@
#task queue implementation, default "zookeeper"
escheduler.queue.impl=zookeeper
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/escheduler
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/escheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/escheduler/exec
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
data.store2hdfs.basepath=/escheduler
# whether hdfs starts
hdfs.startup.state=true
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
escheduler.env.path=/opt/.escheduler_env.sh
escheduler.env.py=/opt/escheduler_env.py
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default "false"
development.state=false

8
escheduler-common/src/main/resources/common/hadoop/hadoop.properties

@ -0,0 +1,8 @@
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
fs.defaultFS=hdfs://mycluster:8020
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s

39
escheduler-common/src/main/resources/quartz.properties

@ -0,0 +1,39 @@
#============================================================================
# Configure Main Scheduler Properties
#============================================================================
org.quartz.scheduler.instanceName = EasyScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.makeSchedulerThreadDaemon = true
org.quartz.jobStore.useProperties = false
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.makeThreadsDaemons = true
org.quartz.threadPool.threadCount = 25
org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.dataSource = myDs
#============================================================================
# Configure Datasources
#============================================================================
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
org.quartz.dataSource.myDs.URL = jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=utf8
org.quartz.dataSource.myDs.user = xx
org.quartz.dataSource.myDs.password = xx
org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1

24
escheduler-common/src/main/resources/zookeeper.properties

@ -0,0 +1,24 @@
#zookeeper cluster
zookeeper.quorum=192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
#escheduler root directory
zookeeper.escheduler.root=/escheduler
#zookeeper server dirctory
zookeeper.escheduler.dead.servers=/escheduler/dead-servers
zookeeper.escheduler.masters=/escheduler/masters
zookeeper.escheduler.workers=/escheduler/workers
#zookeeper lock dirctory
zookeeper.escheduler.lock.masters=/escheduler/lock/masters
zookeeper.escheduler.lock.workers=/escheduler/lock/workers
#escheduler failover directory
zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters
zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers
#escheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.sleep=1000
zookeeper.retry.maxtime=5

355
escheduler-common/src/test/java/cn/escheduler/common/graph/DAGTest.java

@ -0,0 +1,355 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.graph;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.junit.Assert.*;
public class DAGTest {
private DAG<Integer, String, String> graph;
private static final Logger logger = LoggerFactory.getLogger(DAGTest.class);
@Before
public void setup() {
graph = new DAG<>();
}
@After
public void tearDown() {
clear();
}
private void clear() {
graph = null;
graph = new DAG<>();
assertEquals(graph.getNodesCount(), 0);
}
private void makeGraph() {
clear();
// 1->2
// 2->5
// 3->5
// 4->6
// 5->6
// 6->7
for (int i = 1; i <= 7; ++i) {
graph.addNode(i, "v(" + i + ")");
}
// 构造边
assertTrue(graph.addEdge(1, 2));
assertTrue(graph.addEdge(2, 5));
assertTrue(graph.addEdge(3, 5));
assertTrue(graph.addEdge(4, 6));
assertTrue(graph.addEdge(5, 6));
assertTrue(graph.addEdge(6, 7));
assertEquals(graph.getNodesCount(), 7);
assertEquals(graph.getEdgesCount(), 6);
}
/**
* 测试增加顶点
*/
@Test
public void testAddNode() {
clear();
graph.addNode(1, "v(1)");
graph.addNode(2, null);
graph.addNode(5, "v(5)");
assertEquals(graph.getNodesCount(), 3);
assertEquals(graph.getNode(1), "v(1)");
assertTrue(graph.containsNode(1));
assertFalse(graph.containsNode(10));
}
/**
* 添加边
*/
@Test
public void testAddEdge() {
clear();
assertFalse(graph.addEdge(1, 2, "edge(1 -> 2)", false));
graph.addNode(1, "v(1)");
assertTrue(graph.addEdge(1, 2, "edge(1 -> 2)",true));
graph.addNode(2, "v(2)");
assertTrue(graph.addEdge(1, 2, "edge(1 -> 2)",true));
assertFalse(graph.containsEdge(1, 3));
assertTrue(graph.containsEdge(1, 2));
assertEquals(graph.getEdgesCount(), 1);
}
/**
* 测试后续结点
*/
@Test
public void testSubsequentNodes() {
makeGraph();
assertEquals(graph.getSubsequentNodes(1).size(), 1);
}
/**
* 测试入度
*/
@Test
public void testIndegree() {
makeGraph();
assertEquals(graph.getIndegree(1), 0);
assertEquals(graph.getIndegree(2), 1);
assertEquals(graph.getIndegree(3), 0);
assertEquals(graph.getIndegree(4), 0);
}
/**
* 测试起点
*/
@Test
public void testBeginNode() {
makeGraph();
assertEquals(graph.getBeginNode().size(), 3);
assertTrue(graph.getBeginNode().contains(1));
assertTrue(graph.getBeginNode().contains(3));
assertTrue(graph.getBeginNode().contains(4));
}
/**
* 测试终点
*/
@Test
public void testEndNode() {
makeGraph();
assertEquals(graph.getEndNode().size(), 1);
assertTrue(graph.getEndNode().contains(7));
}
/**
* 测试环
*/
@Test
public void testCycle() {
clear();
// 构造顶点
for (int i = 1; i <= 5; ++i) {
graph.addNode(i, "v(" + i + ")");
}
// 构造边, 1->2, 2->3, 3->4
try {
graph.addEdge(1, 2);
graph.addEdge(2, 3);
graph.addEdge(3, 4);
assertFalse(graph.hasCycle());
} catch (Exception e) {
e.printStackTrace();
fail();
}
try {
boolean addResult = graph.addEdge(4, 1);//有环,添加失败
if(!addResult){//有环,添加失败
assertTrue(true);
}
graph.addEdge(5, 1);
assertFalse(graph.hasCycle());
} catch (Exception e) {
e.printStackTrace();
fail();
}
// 重新清空
clear();
// 构造顶点
for (int i = 1; i <= 5; ++i) {
graph.addNode(i, "v(" + i +")");
}
// 构造边, 1->2, 2->3, 3->4
try {
graph.addEdge(1, 2);
graph.addEdge(2, 3);
graph.addEdge(3, 4);
graph.addEdge(4, 5);
graph.addEdge(5, 2);//会失败,添加不进去,所以下一步无环
assertFalse(graph.hasCycle());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testTopologicalSort(){
makeGraph();
try {
List<Integer> topoList = new ArrayList<>();//一种拓扑结果是1 3 4 2 5 6 7
topoList.add(1);
topoList.add(3);
topoList.add(4);
topoList.add(2);
topoList.add(5);
topoList.add(6);
topoList.add(7);
assertEquals(graph.topologicalSort(),topoList);
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testTopologicalSort2() {
clear();
graph.addEdge(1, 2, null, true);
graph.addEdge(2, 3, null, true);
graph.addEdge(3, 4, null, true);
graph.addEdge(4, 5, null, true);
graph.addEdge(5, 1, null, false); //因环会添加失败,ERROR级别日志输出
try {
List<Integer> topoList = new ArrayList<>();//拓扑结果是1 2 3 4 5
topoList.add(1);
topoList.add(2);
topoList.add(3);
topoList.add(4);
topoList.add(5);
assertEquals(graph.topologicalSort(),topoList);
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
/**
*
*/
@Test
public void testTopologicalSort3() throws Exception {
clear();
// 1->2
// 1->3
// 2->5
// 3->4
// 4->6
// 5->6
// 6->7
// 6->8
for (int i = 1; i <= 8; ++i) {
graph.addNode(i, "v(" + i + ")");
}
// 构造边
assertTrue(graph.addEdge(1, 2));
assertTrue(graph.addEdge(1, 3));
assertTrue(graph.addEdge(2, 5));
assertTrue(graph.addEdge(3, 4));
assertTrue(graph.addEdge(4, 6));
assertTrue(graph.addEdge(5, 6));
assertTrue(graph.addEdge(6, 7));
assertTrue(graph.addEdge(6, 8));
assertEquals(graph.getNodesCount(), 8);
logger.info(Arrays.toString(graph.topologicalSort().toArray()));
List<Integer> expectedList = new ArrayList<>();
for (int i = 1; i <= 8; ++i) {
expectedList.add(i);
logger.info(i + " subsequentNodes : " + graph.getSubsequentNodes(i));
}
assertArrayEquals(expectedList.toArray(),graph.topologicalSort().toArray());
logger.info(6 + " previousNodesb: " + graph.getPreviousNodes(6));
}
}

140
escheduler-common/src/test/java/cn/escheduler/common/os/OSUtilsTest.java

@ -0,0 +1,140 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.os;
import cn.escheduler.common.utils.OSUtils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import oshi.hardware.GlobalMemory;
import java.math.RoundingMode;
import java.text.DecimalFormat;
/**
* OSUtilsTest
*/
public class OSUtilsTest {
private static Logger logger = LoggerFactory.getLogger(OSUtilsTest.class);
// static SystemInfo si = new SystemInfo();
// static HardwareAbstractionLayer hal = si.getHardware();
@Test
public void memoryUsage() {
logger.info("memoryUsage : {}", OSUtils.memoryUsage());// 0.3361799418926239
// printMemory(hal.getMemory());// 35 %
}
@Test
public void availablePhysicalMemorySize() {
logger.info("availablePhysicalMemorySize : {}", OSUtils.availablePhysicalMemorySize());
logger.info("availablePhysicalMemorySize : {}", OSUtils.totalMemorySize() / 10);
}
@Test
public void loadAverage() {
logger.info("memoryUsage : {}", OSUtils.loadAverage());
}
private void printMemory(GlobalMemory memory) {
logger.info("memoryUsage : {} %" , (memory.getTotal() - memory.getAvailable()) * 100 / memory.getTotal() );
}
@Test
public void cpuUsage() throws Exception {
logger.info("cpuUsage : {}", OSUtils.cpuUsage());
Thread.sleep(1000l);
logger.info("cpuUsage : {}", OSUtils.cpuUsage());
double cpuUsage = OSUtils.cpuUsage();
DecimalFormat df = new DecimalFormat("0.00");
df.setRoundingMode(RoundingMode.HALF_UP);
logger.info("cpuUsage1 : {}", df.format(cpuUsage));
}
//
// @Test
// public void getUserList() {
// logger.info("getUserList : {}", OSUtils.getUserList());
// }
//
//
// @Test
// public void getGroup() throws Exception {
// logger.info("getGroup : {}", OSUtils.getGroup());
// logger.info("getGroup : {}", OSUtils.exeShell("groups"));
//
//
// }
//
//
// @Test
// public void getProcessID() {
// logger.info("getProcessID : {}", OSUtils.getProcessID());
// }
//
//
// @Test
// public void getHost() {
// logger.info("getHost : {}", OSUtils.getHost());
// }
//
//
//
// @Test
// public void anotherGetOsInfoTest() throws InterruptedException {
// OperatingSystemMXBean os = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class);
// final MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
//
// MemoryUsage memoryUsage = memoryMXBean.getHeapMemoryUsage();
// double usage = (double)memoryUsage.getUsed() / (double)memoryUsage.getCommitted();
// logger.info("memory usage : {}",usage);
//
// if (os instanceof UnixOperatingSystemMXBean) {
// UnixOperatingSystemMXBean unixOs = (UnixOperatingSystemMXBean) os;
// logger.info("getMaxFileDescriptorCount : {}" ,unixOs.getMaxFileDescriptorCount()); //10240
// logger.info("getOpenFileDescriptorCount : {}",unixOs.getOpenFileDescriptorCount()); //241
// logger.info("getAvailableProcessors : {}",unixOs.getAvailableProcessors()); //8
//
// logger.info("getSystemLoadAverage : {}",unixOs.getSystemLoadAverage()); //1.36083984375
//
// logger.info("getFreePhysicalMemorySize : {}",unixOs.getFreePhysicalMemorySize()); //209768448
//
// logger.info("getTotalPhysicalMemorySize : {}",unixOs.getTotalPhysicalMemorySize()); //17179869184 16G
//
// for(int i = 0; i < 3; i++) {
// logger.info("getSystemCpuLoad : {}", unixOs.getSystemCpuLoad()); //0.0
//
// logger.info("getProcessCpuLoad : {}", unixOs.getProcessCpuLoad() * 10); //0.0
// Thread.sleep(1000l);
// }
// }
// }
//
}

112
escheduler-common/src/test/java/cn/escheduler/common/os/OshiTest.java

@ -0,0 +1,112 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.os;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import oshi.SystemInfo;
import oshi.hardware.CentralProcessor;
import oshi.hardware.CentralProcessor.TickType;
import oshi.hardware.GlobalMemory;
import oshi.hardware.HardwareAbstractionLayer;
import oshi.util.FormatUtil;
import oshi.util.Util;
import java.util.Arrays;
/**
* os information test
*/
public class OshiTest {
private static Logger logger = LoggerFactory.getLogger(OshiTest.class);
@Test
public void test() {
SystemInfo si = new SystemInfo();
HardwareAbstractionLayer hal = si.getHardware();
logger.info("Checking Memory...");
printMemory(hal.getMemory());
logger.info("Checking CPU...");
printCpu(hal.getProcessor());
}
private static void printMemory(GlobalMemory memory) {
logger.info("memory avail:{} MB" , memory.getAvailable() / 1024 / 1024 );//memory avail:6863 MB
logger.info("memory total:{} MB" , memory.getTotal() / 1024 / 1024 );//memory total:16384 MB
}
private static void printCpu(CentralProcessor processor) {
logger.info(String.format("CPU load: %.1f%% (OS MXBean)%n", processor.getSystemCpuLoad() * 100));//CPU load: 24.9% (OS MXBean)
logger.info("CPU load averages : {}", processor.getSystemLoadAverage());//CPU load averages : 1.5234375
logger.info("Uptime: " + FormatUtil.formatElapsedSecs(processor.getSystemUptime()));
logger.info("Context Switches/Interrupts: " + processor.getContextSwitches() + " / " + processor.getInterrupts());
long[] prevTicks = processor.getSystemCpuLoadTicks();
logger.info("CPU, IOWait, and IRQ ticks @ 0 sec:" + Arrays.toString(prevTicks));
//Wait a second...
Util.sleep(1000);
long[] ticks = processor.getSystemCpuLoadTicks();
logger.info("CPU, IOWait, and IRQ ticks @ 1 sec:" + Arrays.toString(ticks));
long user = ticks[TickType.USER.getIndex()] - prevTicks[TickType.USER.getIndex()];
long nice = ticks[TickType.NICE.getIndex()] - prevTicks[TickType.NICE.getIndex()];
long sys = ticks[TickType.SYSTEM.getIndex()] - prevTicks[TickType.SYSTEM.getIndex()];
long idle = ticks[TickType.IDLE.getIndex()] - prevTicks[TickType.IDLE.getIndex()];
long iowait = ticks[TickType.IOWAIT.getIndex()] - prevTicks[TickType.IOWAIT.getIndex()];
long irq = ticks[TickType.IRQ.getIndex()] - prevTicks[TickType.IRQ.getIndex()];
long softirq = ticks[TickType.SOFTIRQ.getIndex()] - prevTicks[TickType.SOFTIRQ.getIndex()];
long steal = ticks[TickType.STEAL.getIndex()] - prevTicks[TickType.STEAL.getIndex()];
long totalCpu = user + nice + sys + idle + iowait + irq + softirq + steal;
logger.info(String.format(
"User: %.1f%% Nice: %.1f%% System: %.1f%% Idle: %.1f%% IOwait: %.1f%% IRQ: %.1f%% SoftIRQ: %.1f%% Steal: %.1f%%%n",
100d * user / totalCpu, 100d * nice / totalCpu, 100d * sys / totalCpu, 100d * idle / totalCpu,
100d * iowait / totalCpu, 100d * irq / totalCpu, 100d * softirq / totalCpu, 100d * steal / totalCpu));
logger.info(String.format("CPU load: %.1f%% (counting ticks)%n", processor.getSystemCpuLoadBetweenTicks() * 100));
double[] loadAverage = processor.getSystemLoadAverage(3);
logger.info("CPU load averages:" + (loadAverage[0] < 0 ? " N/A" : String.format(" %.2f", loadAverage[0]))
+ (loadAverage[1] < 0 ? " N/A" : String.format(" %.2f", loadAverage[1]))
+ (loadAverage[2] < 0 ? " N/A" : String.format(" %.2f", loadAverage[2])));
// per core CPU
StringBuilder procCpu = new StringBuilder("CPU load per processor:");
double[] load = processor.getProcessorCpuLoadBetweenTicks();
for (double avg : load) {
procCpu.append(String.format(" %.1f%%", avg * 100));
}
logger.info(procCpu.toString());
}
}

112
escheduler-common/src/test/java/cn/escheduler/common/queue/TaskQueueImplTest.java

@ -0,0 +1,112 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.queue;
import cn.escheduler.common.Constants;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.Random;
import static org.junit.Assert.assertEquals;
/**
* task queue test
*/
public class TaskQueueImplTest {
private static final Logger logger = LoggerFactory.getLogger(TaskQueueImplTest.class);
@Test
public void testTaskQueue(){
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
//clear all data
tasksQueue.delete();
//add
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"1");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"2");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"3");
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE,"4");
//pop
String node1 = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE);
assertEquals(node1,"1");
String node2 = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE);
assertEquals(node2,"2");
//sadd
String task1 = "1.1.1.1-1-mr";
String task2 = "1.1.1.2-2-mr";
String task3 = "1.1.1.3-3-mr";
String task4 = "1.1.1.4-4-mr";
String task5 = "1.1.1.5-5-mr";
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task1);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task2);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task3);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task4);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task5);
tasksQueue.sadd(Constants.SCHEDULER_TASKS_KILL,task5); //repeat task
Assert.assertEquals(tasksQueue.smembers(Constants.SCHEDULER_TASKS_KILL).size(),5);
logger.info(Arrays.toString(tasksQueue.smembers(Constants.SCHEDULER_TASKS_KILL).toArray()));
//srem
tasksQueue.srem(Constants.SCHEDULER_TASKS_KILL,task5);
//smembers
Assert.assertEquals(tasksQueue.smembers(Constants.SCHEDULER_TASKS_KILL).size(),4);
logger.info(Arrays.toString(tasksQueue.smembers(Constants.SCHEDULER_TASKS_KILL).toArray()));
}
/**
* test one million data from zookeeper queue
*/
@Test
public void extremeTest(){
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
//clear all data
tasksQueue.delete();
int total = 30 * 10000;
for(int i = 0; i < total; i++)
{
for(int j = 0; j < total; j++) {
//${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
//format ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskId}
String formatTask = String.format("%s_%d_%s_%d", i, i + 1, j, j == 0 ? 0 : j + new Random().nextInt(100));
tasksQueue.add(Constants.SCHEDULER_TASKS_QUEUE, formatTask);
}
}
String node1 = tasksQueue.poll(Constants.SCHEDULER_TASKS_QUEUE);
assertEquals(node1,"0");
//clear all data
tasksQueue.delete();
}
}

76
escheduler-common/src/test/java/cn/escheduler/common/shell/ShellExecutorTest.java

@ -0,0 +1,76 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.shell;
import cn.escheduler.common.thread.ThreadPoolExecutors;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
public class ShellExecutorTest {
private static final Logger logger = LoggerFactory.getLogger(ShellExecutorTest.class);
@Test
public void execCommand() throws InterruptedException {
ThreadPoolExecutors executors = ThreadPoolExecutors.getInstance();
CountDownLatch latch = new CountDownLatch(200);
executors.execute(new Runnable() {
@Override
public void run() {
try {
int i =0;
while(i++ <= 100){
String res = ShellExecutor.execCommand("groups");
logger.info("time:" + i + ",thread id:" + Thread.currentThread().getId() + ", result:" + res.substring(0,5));
Thread.sleep(100l);
latch.countDown();
}
} catch (IOException | InterruptedException e) {
e.printStackTrace();
}
}
});
executors.execute(new Runnable() {
@Override
public void run() {
try {
int i =0;
while(i++ <= 100){
String res = ShellExecutor.execCommand("whoami");
logger.info("time:" + i + ",thread id:" + Thread.currentThread().getId() + ", result2:" + res);
Thread.sleep(100l);
latch.countDown();
}
} catch (IOException | InterruptedException e) {
e.printStackTrace();
}
}
});
latch.await();
}
}

53
escheduler-common/src/test/java/cn/escheduler/common/threadutils/ThreadPoolExecutorsTest.java

@ -0,0 +1,53 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.threadutils;
import cn.escheduler.common.thread.ThreadPoolExecutors;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ThreadPoolExecutorsTest {
private static final Logger logger = LoggerFactory.getLogger(ThreadPoolExecutors.class);
@Test
public void testThreadPoolExecutors() throws InterruptedException {
Thread2[] threadArr = new Thread2[10];
for (int i = 0; i < threadArr.length; i++) {
threadArr[i] = new Thread2();
threadArr[i].setDaemon(false);
threadArr[i].start();
}
Thread.currentThread().join(40000l);
}
//test thread
class Thread2 extends Thread {
@Override
public void run() {
logger.info(String.format("ThreadPoolExecutors instance's hashcode is: %s ",ThreadPoolExecutors.getInstance("a",2).hashCode()));
}
}
}

63
escheduler-common/src/test/java/cn/escheduler/common/utils/CollectionUtilsTest.java

@ -0,0 +1,63 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class CollectionUtilsTest {
@Test
public void equalLists() {
List<Integer> a = new ArrayList<Integer>();
a.add(1);
a.add(2);
a.add(3);
List<Integer> b = new ArrayList<Integer>();
b.add(3);
b.add(2);
b.add(1);
Assert.assertTrue(CollectionUtils.equalLists(a,b));
}
@Test
public void subtract() {
Set<Integer> a = new HashSet<Integer>();
a.add(1);
a.add(2);
a.add(3);
Set<Integer> b = new HashSet<Integer>();
b.add(0);
b.add(2);
b.add(4);
Assert.assertArrayEquals(new Integer[]{1,3},CollectionUtils.subtract(a,b).toArray());
}
}

61
escheduler-common/src/test/java/cn/escheduler/common/utils/CommonUtilsTest.java

@ -0,0 +1,61 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.net.UnknownHostException;
/**
* configuration test
*/
public class CommonUtilsTest {
private static final Logger logger = LoggerFactory.getLogger(CommonUtilsTest.class);
@Test
public void getHdfsDataBasePath() {
logger.info(HadoopUtils.getHdfsDataBasePath());
}
@Test
public void getDownloadFilename() {
logger.info(FileUtils.getDownloadFilename("a.txt"));
}
@Test
public void getUploadFilename() {
logger.info(FileUtils.getUploadFilename("1234", "a.txt"));
}
@Test
public void getHdfsDir() {
logger.info(HadoopUtils.getHdfsDir("1234"));
}
@Test
public void test(){
InetAddress IP = null;
try {
IP = InetAddress.getLocalHost();
logger.info(IP.getHostAddress());
} catch (UnknownHostException e) {
e.printStackTrace();
}
}
}

57
escheduler-common/src/test/java/cn/escheduler/common/utils/DateUtilsTest.java

@ -0,0 +1,57 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.utils;
import org.junit.Assert;
import org.junit.Test;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
public class DateUtilsTest {
@Test
public void format2Readable() throws ParseException {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String start = "2015-12-21 18:00:36";
Date startDate = sdf.parse(start);
String end = "2015-12-23 03:23:44";
Date endDate = sdf.parse(end);
String readableDate = DateUtils.format2Readable(endDate.getTime() - startDate.getTime());
Assert.assertEquals("01 09:23:08", readableDate);
}
@Test
public void testWeek(){
Date curr = DateUtils.stringToDate("2019-02-01 00:00:00");
Date monday1 = DateUtils.stringToDate("2019-01-28 00:00:00");
Date sunday1 = DateUtils.stringToDate("2019-02-03 00:00:00");
Date monday = DateUtils.getMonday(curr);
Date sunday = DateUtils.getSunday(monday);
Assert.assertEquals(monday, monday1);
Assert.assertEquals(sunday, sunday1);
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save