children) {
+ this.children = children;
+ }
+
+
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/enums/ExecuteType.java b/escheduler-api/src/main/java/cn/escheduler/api/enums/ExecuteType.java
new file mode 100644
index 0000000000..b09f424b30
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/enums/ExecuteType.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.enums;
+
+/**
+ * execute type
+ */
+public enum ExecuteType {
+
+
+ /**
+ * 操作类型
+ * 1.重跑 2.恢复暂停 3.恢复失败 4.停止 5.暂停
+ */
+ NONE,REPEAT_RUNNING, RECOVER_SUSPENDED_PROCESS, START_FAILURE_TASK_PROCESS, STOP, PAUSE;
+
+
+ public static ExecuteType getEnum(int value){
+ for (ExecuteType e: ExecuteType.values()) {
+ if(e.ordinal() == value) {
+ return e;
+ }
+ }
+ return null;//For values out of enum scope
+ }
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java b/escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java
new file mode 100644
index 0000000000..265dac3d22
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.enums;
+
+/**
+ * status enum
+ */
+public enum Status {
+
+ SUCCESS(0, "success"),
+
+ REQUEST_PARAMS_NOT_VALID_ERROR(10001, "request parameter {0} is not valid"),
+ TASK_TIMEOUT_PARAMS_ERROR(10002, "task timeout parameter is not valid"),
+ USER_NAME_EXIST(10003, "user name already exists"),
+ USER_NAME_NULL(10004,"user name is null"),
+// DB_OPERATION_ERROR(10005, "database operation error"),
+ HDFS_OPERATION_ERROR(10006, "hdfs operation error"),
+ UPDATE_FAILED(10007, "updateProcessInstance failed"),
+ TASK_INSTANCE_HOST_NOT_FOUND(10008, "task instance does not set host"),
+ TENANT_NAME_EXIST(10009, "tenant name already exists"),
+ USER_NOT_EXIST(10010, "user {0} not exists"),
+ ALERT_GROUP_NOT_EXIST(10011, "alarm group not found"),
+ ALERT_GROUP_EXIST(10012, "alarm group already exists"),
+ USER_NAME_PASSWD_ERROR(10013,"user name or password error"),
+ LOGIN_SESSION_FAILED(10014,"create session failed!"),
+ DATASOURCE_EXIST(10015, "data source name already exists"),
+ DATASOURCE_CONNECT_FAILED(10016, "data source connection failed"),
+ TENANT_NOT_EXIST(10017, "tenant not exists"),
+ PROJECT_NOT_FOUNT(10018, "project {0} not found "),
+ PROJECT_ALREADY_EXISTS(10019, "project {0} already exists"),
+ TASK_INSTANCE_NOT_EXISTS(10020, "task instance {0} does not exist"),
+ TASK_INSTANCE_NOT_SUB_WORKFLOW_INSTANCE(10021, "task instance {0} is not sub process instance"),
+ SCHEDULE_CRON_NOT_EXISTS(10022, "scheduler crontab {0} does not exist"),
+ SCHEDULE_CRON_ONLINE_FORBID_UPDATE(10023, "online status does not allow updateProcessInstance operations"),
+ SCHEDULE_CRON_CHECK_FAILED(10024, "scheduler crontab expression validation failure: {0}"),
+ MASTER_NOT_EXISTS(10025, "master does not exist"),
+ SCHEDULE_STATUS_UNKNOWN(10026, "unknown command: {0}"),
+ CREATE_ALERT_GROUP_ERROR(10027,"create alert group error"),
+ QUERY_ALL_ALERTGROUP_ERROR(10028,"query all alertgroup error"),
+ LIST_PAGING_ALERT_GROUP_ERROR(10029,"list paging alert group error"),
+ UPDATE_ALERT_GROUP_ERROR(10030,"updateProcessInstance alert group error"),
+ DELETE_ALERT_GROUP_ERROR(10031,"delete alert group error"),
+ ALERT_GROUP_GRANT_USER_ERROR(10032,"alert group grant user error"),
+ CREATE_DATASOURCE_ERROR(10033,"create datasource error"),
+ UPDATE_DATASOURCE_ERROR(10034,"updateProcessInstance datasource error"),
+ QUERY_DATASOURCE_ERROR(10035,"query datasource error"),
+ CONNECT_DATASOURCE_FAILURE(10036,"connect datasource failure"),
+ CONNECTION_TEST_FAILURE(10037,"connection test failure"),
+ DELETE_DATA_SOURCE_FAILURE(10038,"delete data source failure"),
+ VERFIY_DATASOURCE_NAME_FAILURE(10039,"verfiy datasource name failure"),
+ UNAUTHORIZED_DATASOURCE(10040,"unauthorized datasource"),
+ AUTHORIZED_DATA_SOURCE(10041,"authorized data source"),
+ LOGIN_SUCCESS(10042,"login success"),
+ USER_LOGIN_FAILURE(10043,"user login failure"),
+ LIST_WORKERS_ERROR(10044,"list workers error"),
+ LIST_MASTERS_ERROR(10045,"list masters error"),
+ UPDATE_PROJECT_ERROR(10046,"updateProcessInstance project error"),
+ QUERY_PROJECT_DETAILS_BY_ID_ERROR(10047,"query project details by id error"),
+ CREATE_PROJECT_ERROR(10048,"create project error"),
+ LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR(10049,"login user query project list paging error"),
+ DELETE_PROJECT_ERROR(10050,"delete project error"),
+ QUERY_UNAUTHORIZED_PROJECT_ERROR(10051,"query unauthorized project error"),
+ QUERY_AUTHORIZED_PROJECT(10052,"query authorized project"),
+ QUERY_QUEUE_LIST_ERROR(10053,"query queue list error"),
+ CREATE_RESOURCE_ERROR(10054,"create resource error"),
+ UPDATE_RESOURCE_ERROR(10055,"updateProcessInstance resource error"),
+ QUERY_RESOURCES_LIST_ERROR(10056,"query resources list error"),
+ QUERY_RESOURCES_LIST_PAGING(10057,"query resources list paging"),
+ DELETE_RESOURCE_ERROR(10058,"delete resource error"),
+ VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR(10059,"verify resource by name and type error"),
+ VIEW_RESOURCE_FILE_ON_LINE_ERROR(10060,"view resource file online error"),
+ CREATE_RESOURCE_FILE_ON_LINE_ERROR(10061,"create resource file online error"),
+ RESOURCE_FILE_IS_EMPTY(10062,"resource file is empty"),
+ EDIT_RESOURCE_FILE_ON_LINE_ERROR(10063,"edit resource file online error"),
+ DOWNLOAD_RESOURCE_FILE_ERROR(10064,"download resource file error"),
+ CREATE_UDF_FUNCTION_ERROR(10065 ,"create udf function error"),
+ VIEW_UDF_FUNCTION_ERROR( 10066,"view udf function error"),
+ UPDATE_UDF_FUNCTION_ERROR(10067,"updateProcessInstance udf function error"),
+ QUERY_UDF_FUNCTION_LIST_PAGING_ERROR( 10068,"query udf function list paging error"),
+ QUERY_DATASOURCE_BY_TYPE_ERROR( 10069,"query datasource by type error"),
+ VERIFY_UDF_FUNCTION_NAME_ERROR( 10070,"verify udf function name error"),
+ DELETE_UDF_FUNCTION_ERROR( 10071,"delete udf function error"),
+ AUTHORIZED_FILE_RESOURCE_ERROR( 10072,"authorized file resource error"),
+ UNAUTHORIZED_FILE_RESOURCE_ERROR( 10073,"unauthorized file resource error"),
+ UNAUTHORIZED_UDF_FUNCTION_ERROR( 10074,"unauthorized udf function error"),
+ AUTHORIZED_UDF_FUNCTION_ERROR(10075,"authorized udf function error"),
+ CREATE_SCHEDULE_ERROR(10076,"create schedule error"),
+ UPDATE_SCHEDULE_ERROR(10077,"updateProcessInstance schedule error"),
+ PUBLISH_SCHEDULE_ONLINE_ERROR(10078,"publish schedule online error"),
+ OFFLINE_SCHEDULE_ERROR(10079,"offline schedule error"),
+ QUERY_SCHEDULE_LIST_PAGING_ERROR(10080,"query schedule list paging error"),
+ QUERY_SCHEDULE_LIST_ERROR(10081,"query schedule list error"),
+ QUERY_TASK_LIST_PAGING_ERROR(10082,"query task list paging error"),
+ QUERY_TASK_RECORD_LIST_PAGING_ERROR(10083,"query task record list paging error"),
+ CREATE_TENANT_ERROR(10084,"create tenant error"),
+ QUERY_TENANT_LIST_PAGING_ERROR(10085,"query tenant list paging error"),
+ QUERY_TENANT_LIST_ERROR(10086,"query tenant list error"),
+ UPDATE_TENANT_ERROR(10087,"updateProcessInstance tenant error"),
+ DELETE_TENANT_BY_ID_ERROR(10088,"delete tenant by id error"),
+ VERIFY_TENANT_CODE_ERROR(10089,"verify tenant code error"),
+ CREATE_USER_ERROR(10090,"create user error"),
+ QUERY_USER_LIST_PAGING_ERROR(10091,"query user list paging error"),
+ UPDATE_USER_ERROR(10092,"updateProcessInstance user error"),
+ DELETE_USER_BY_ID_ERROR(10093,"delete user by id error"),
+ GRANT_PROJECT_ERROR(10094,"grant project error"),
+ GRANT_RESOURCE_ERROR(10095,"grant resource error"),
+ GRANT_UDF_FUNCTION_ERROR(10096,"grant udf function error"),
+ GRANT_DATASOURCE_ERROR(10097,"grant datasource error"),
+ GET_USER_INFO_ERROR(10098,"get user info error"),
+ USER_LIST_ERROR(10099,"user list error"),
+ VERIFY_USERNAME_ERROR(10100,"verify username error"),
+ UNAUTHORIZED_USER_ERROR(10101,"unauthorized user error"),
+ AUTHORIZED_USER_ERROR(10102,"authorized user error"),
+ QUERY_TASK_INSTANCE_LOG_ERROR(10103,"view task instance log error"),
+ DOWNLOAD_TASK_INSTANCE_LOG_FILE_ERROR(10104,"download task instance log file error"),
+ CREATE_PROCESS_DEFINITION(10105,"create process definition"),
+ VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR(10106,"verify process definition name unique error"),
+ UPDATE_PROCESS_DEFINITION_ERROR(10107,"updateProcessInstance process definition error"),
+ RELEASE_PROCESS_DEFINITION_ERROR(10108,"release process definition error"),
+ QUERY_DATAIL_OF_PROCESS_DEFINITION_ERROR(10109,"query datail of process definition error"),
+ QUERY_PROCCESS_DEFINITION_LIST(10110,"query proccess definition list"),
+ ENCAPSULATION_TREEVIEW_STRUCTURE_ERROR(10111,"encapsulation treeview structure error"),
+ GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR(10112,"get tasks list by process definition id error"),
+ QUERY_PROCESS_INSTANCE_LIST_PAGING_ERROR(10113,"query process instance list paging error"),
+ QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_ERROR(10114,"query task list by process instance id error"),
+ UPDATE_PROCESS_INSTANCE_ERROR(10115,"updateProcessInstance process instance error"),
+ QUERY_PROCESS_INSTANCE_BY_ID_ERROR(10116,"query process instance by id error"),
+ DELETE_PROCESS_INSTANCE_BY_ID_ERROR(10117,"delete process instance by id error"),
+ QUERY_SUB_PROCESS_INSTANCE_DETAIL_INFO_BY_TASK_ID_ERROR(10118,"query sub process instance detail info by task id error"),
+ QUERY_PARENT_PROCESS_INSTANCE_DETAIL_INFO_BY_SUB_PROCESS_INSTANCE_ID_ERROR(10119,"query parent process instance detail info by sub process instance id error"),
+ QUERY_PROCESS_INSTANCE_ALL_VARIABLES_ERROR(10120,"query process instance all variables error"),
+ ENCAPSULATION_PROCESS_INSTANCE_GANTT_STRUCTURE_ERROR(10121,"encapsulation process instance gantt structure error"),
+ QUERY_PROCCESS_DEFINITION_LIST_PAGING_ERROR(10122,"query proccess definition list paging error"),
+ SIGN_OUT_ERROR(10123,"sign out error"),
+ TENANT_CODE_HAS_ALREADY_EXISTS(10124,"tenant code has already exists"),
+ IP_IS_EMPTY(10125,"ip is empty"),
+ SCHEDULE_CRON_REALEASE_NEED_NOT_CHANGE(10126, "schedule release is already {0}"),
+
+
+ UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found"),
+ UDF_FUNCTION_EXISTS(20002, "UDF function already exists"),
+// RESOURCE_EMPTY(20003, "resource file is empty"),
+ RESOURCE_NOT_EXIST(20004, "resource not exist"),
+ RESOURCE_EXIST(20005, "resource already exists"),
+ RESOURCE_SUFFIX_NOT_SUPPORT_VIEW(20006, "resource suffix do not support online viewing"),
+ RESOURCE_SIZE_EXCEED_LIMIT(20007, "upload resource file size exceeds limit"),
+ RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified"),
+ UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar"),
+ HDFS_COPY_FAIL(20009, "hdfs copy {0} -> {1} fail"),
+
+
+
+ USER_NO_OPERATION_PERM(30001, "user has no operation privilege"),
+ USER_NO_OPERATION_PROJECT_PERM(30002, "user {0} is not has project {1} permission"),
+
+
+ PROCESS_INSTANCE_NOT_EXIST(50001, "process instance {0} does not exist"),
+ PROCESS_INSTANCE_EXIST(50002, "process instance {0} already exists"),
+ PROCESS_DEFINE_NOT_EXIST(50003, "process definition {0} does not exist"),
+ PROCESS_DEFINE_NOT_RELEASE(50004, "process definition {0} not on line"),
+ PROCESS_INSTANCE_ALREADY_CHANGED(50005, "the status of process instance {0} is already {1}"),
+ PROCESS_INSTANCE_STATE_OPERATION_ERROR(50006, "the status of process instance {0} is {1},Cannot perform {2} operation"),
+ SUB_PROCESS_INSTANCE_NOT_EXIST(50007, "the task belong to process instance does not exist"),
+ PROCESS_DEFINE_NOT_ALLOWED_EDIT(50008, "process definition {0} does not allow edit"),
+ PROCESS_INSTANCE_EXECUTING_COMMAND(50009, "process instance {0} is executing the command, please wait ..."),
+ PROCESS_INSTANCE_NOT_SUB_PROCESS_INSTANCE(50010, "process instance {0} is not sub process instance"),
+ TASK_INSTANCE_STATE_COUNT_ERROR(50011,"task instance state count error"),
+ COUNT_PROCESS_INSTANCE_STATE_ERROR(50012,"count process instance state error"),
+ COUNT_PROCESS_DEFINITION_USER_ERROR(50013,"count process definition user error"),
+ START_PROCESS_INSTANCE_ERROR(50014,"start process instance error"),
+ EXECUTE_PROCESS_INSTANCE_ERROR(50015,"execute process instance error"),
+ CHECK_PROCESS_DEFINITION_ERROR(50016,"check process definition error"),
+ QUERY_RECIPIENTS_AND_COPYERS_BY_PROCESS_DEFINITION_ERROR(50017,"query recipients and copyers by process definition error"),
+ DATA_IS_NOT_VALID(50017,"data %s not valid"),
+ DATA_IS_NULL(50018,"data %s is null"),
+ PROCESS_NODE_HAS_CYCLE(50019,"process node has cycle"),
+ PROCESS_NODE_S_PARAMETER_INVALID(50020,"process node %s parameter invalid"),
+
+
+ HDFS_NOT_STARTUP(60001,"hdfs not startup"),
+ ;
+
+ private int code;
+ private String msg;
+
+ private Status(int code, String msg) {
+ this.code = code;
+ this.msg = msg;
+ }
+
+ public int getCode() {
+ return this.code;
+ }
+
+ public void setCode(int code) {
+ this.code = code;
+ }
+
+ public String getMsg() {
+ return this.msg;
+ }
+
+ public void setMsg(String msg) {
+ this.msg = msg;
+ }
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/interceptor/DruidStatFilter.java b/escheduler-api/src/main/java/cn/escheduler/api/interceptor/DruidStatFilter.java
new file mode 100644
index 0000000000..ea7bff2199
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/interceptor/DruidStatFilter.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.interceptor;
+
+import com.alibaba.druid.support.http.WebStatFilter;
+
+/* this class annotation for druid stat monitor in development
+@WebFilter(filterName="druidWebStatFilter",urlPatterns="/*",
+ initParams={
+ @WebInitParam(name="exclusions",value="*.js,*.gif,*.jpg,*.bmp,*.png,*.css,*.ico,/druid/*")
+ }) */
+public class DruidStatFilter extends WebStatFilter {
+
+
+}
\ No newline at end of file
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/interceptor/DruidStatViewServlet.java b/escheduler-api/src/main/java/cn/escheduler/api/interceptor/DruidStatViewServlet.java
new file mode 100644
index 0000000000..4abac92dfe
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/interceptor/DruidStatViewServlet.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.interceptor;
+
+import com.alibaba.druid.support.http.StatViewServlet;
+
+
+/* this class annotation for druid stat monitor in development
+@WebServlet(urlPatterns = "/druid/*",
+ initParams={
+// @WebInitParam(name="allow",value="127.0.0.1"),
+// @WebInitParam(name="deny",value="192.168.16.111"),
+ @WebInitParam(name="loginUsername",value="admin"),
+ @WebInitParam(name="loginPassword",value="escheduler123"),
+ @WebInitParam(name="resetEnable",value="true")
+ }) */
+public class DruidStatViewServlet extends StatViewServlet {
+
+
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/interceptor/LoginHandlerInterceptor.java b/escheduler-api/src/main/java/cn/escheduler/api/interceptor/LoginHandlerInterceptor.java
new file mode 100644
index 0000000000..7f287bf725
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/interceptor/LoginHandlerInterceptor.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.interceptor;
+
+import cn.escheduler.api.service.SessionService;
+import cn.escheduler.api.utils.Constants;
+import cn.escheduler.dao.mapper.UserMapper;
+import cn.escheduler.dao.model.Session;
+import cn.escheduler.dao.model.User;
+import org.apache.commons.httpclient.HttpStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.web.servlet.HandlerInterceptor;
+import org.springframework.web.servlet.ModelAndView;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+/**
+ * login interceptor, must login first
+ */
+public class LoginHandlerInterceptor implements HandlerInterceptor {
+ private static final Logger logger = LoggerFactory.getLogger(LoginHandlerInterceptor.class);
+
+ @Autowired
+ private SessionService sessionService;
+
+ @Autowired
+ private UserMapper userMapper;
+
+ /**
+ * Intercept the execution of a handler. Called after HandlerMapping determined
+ * an appropriate handler object, but before HandlerAdapter invokes the handler.
+ * DispatcherServlet processes a handler in an execution chain, consisting
+ * of any number of interceptors, with the handler itself at the end.
+ * With this method, each interceptor can decide to abort the execution chain,
+ * typically sending a HTTP error or writing a custom response.
+ *
Note: special considerations apply for asynchronous
+ * request processing. For more details see
+ * {@link org.springframework.web.servlet.AsyncHandlerInterceptor}.
+ * @param request current HTTP request
+ * @param response current HTTP response
+ * @param handler chosen handler to execute, for type and/or instance evaluation
+ * @return {@code true} if the execution chain should proceed with the
+ * next interceptor or the handler itself. Else, DispatcherServlet assumes
+ * that this interceptor has already dealt with the response itself.
+ * @throws Exception in case of errors
+ */
+ @Override
+ public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) {
+
+ Session session = sessionService.getSession(request);
+
+ if(logger.isDebugEnabled()){
+ logger.debug("session info : " + session);
+ }
+
+ if (session == null) {
+ response.setStatus(HttpStatus.SC_UNAUTHORIZED);
+ logger.info("session info is null ");
+ return false;
+ }
+
+ if(logger.isDebugEnabled()){
+ logger.debug("session id: {}", session.getId());
+ }
+
+ //get user object from session
+ User user = userMapper.queryById(session.getUserId());
+
+ if(logger.isDebugEnabled()){
+ logger.info("user info : " + user);
+ }
+
+
+ if (user == null) {
+ response.setStatus(HttpStatus.SC_UNAUTHORIZED);
+ return false;
+ }
+
+ request.setAttribute(Constants.SESSION_USER, user);
+
+ return true;
+ }
+
+ @Override
+ public void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler, ModelAndView modelAndView) throws Exception {
+
+ }
+
+ @Override
+ public void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler, Exception ex) throws Exception {
+
+ }
+
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/log/LogClient.java b/escheduler-api/src/main/java/cn/escheduler/api/log/LogClient.java
new file mode 100644
index 0000000000..2208bd328e
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/log/LogClient.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.log;
+
+import cn.escheduler.rpc.*;
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import io.grpc.StatusRuntimeException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * log client
+ */
+public class LogClient {
+
+ private static final Logger logger = LoggerFactory.getLogger(LogClient.class);
+
+ private final ManagedChannel channel;
+ private final LogViewServiceGrpc.LogViewServiceBlockingStub blockingStub;
+
+ /**
+ * construct client connecting to HelloWorld server at {@code host:port}
+ */
+ public LogClient(String host, int port) {
+ this(ManagedChannelBuilder.forAddress(host, port)
+ // Channels are secure by default (via SSL/TLS). For the example we disable TLS to avoid
+ // needing certificates.
+ .usePlaintext(true));
+ }
+
+ /**
+ * construct client for accessing RouteGuide server using the existing channel
+ *
+ */
+ LogClient(ManagedChannelBuilder> channelBuilder) {
+ /**
+ * set max read size
+ */
+ channelBuilder.maxInboundMessageSize(Integer.MAX_VALUE);
+ channel = channelBuilder.build();
+ blockingStub = LogViewServiceGrpc.newBlockingStub(channel);
+ }
+
+ /**
+ * shutdown
+ *
+ * @throws InterruptedException
+ */
+ public void shutdown() throws InterruptedException {
+ channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
+ }
+
+ /**
+ * roll view log
+ *
+ * @param path
+ * @param skipLineNum
+ * @param limit
+ * @return
+ */
+ public String rollViewLog(String path,int skipLineNum,int limit) {
+ logger.info("roll view log : path {},skipLineNum {} ,limit {}", path, skipLineNum, limit);
+ LogParameter pathParameter = LogParameter
+ .newBuilder()
+ .setPath(path)
+ .setSkipLineNum(skipLineNum)
+ .setLimit(limit)
+ .build();
+ RetStrInfo retStrInfo;
+ try {
+ retStrInfo = blockingStub.rollViewLog(pathParameter);
+ return retStrInfo.getMsg();
+ } catch (StatusRuntimeException e) {
+ logger.error("roll view log error", e);
+ return null;
+ }
+ }
+
+ /**
+ * view log
+ *
+ * @param path
+ * @return
+ */
+ public String viewLog(String path) {
+ logger.info("view queryLog path {}",path);
+ PathParameter pathParameter = PathParameter.newBuilder().setPath(path).build();
+ RetStrInfo retStrInfo;
+ try {
+ retStrInfo = blockingStub.viewLog(pathParameter);
+ return retStrInfo.getMsg();
+ } catch (StatusRuntimeException e) {
+ logger.error("view log error", e);
+ return null;
+ }
+ }
+
+ /**
+ * get log size
+ *
+ * @param path
+ * @return
+ */
+ public byte[] getLogBytes(String path) {
+ logger.info("get log path {}",path);
+ PathParameter pathParameter = PathParameter.newBuilder().setPath(path).build();
+ RetByteInfo retByteInfo;
+ try {
+ retByteInfo = blockingStub.getLogBytes(pathParameter);
+ return retByteInfo.getData().toByteArray();
+ } catch (StatusRuntimeException e) {
+ logger.error("get log size error", e);
+ return null;
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/quartz/ProcessScheduleJob.java b/escheduler-api/src/main/java/cn/escheduler/api/quartz/ProcessScheduleJob.java
new file mode 100644
index 0000000000..3a6fc1e64d
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/quartz/ProcessScheduleJob.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.quartz;
+
+
+import cn.escheduler.common.Constants;
+import cn.escheduler.common.enums.CommandType;
+import cn.escheduler.common.enums.ReleaseState;
+import cn.escheduler.dao.ProcessDao;
+import cn.escheduler.dao.model.Command;
+import cn.escheduler.dao.model.ProcessDefinition;
+import cn.escheduler.dao.model.Schedule;
+import org.quartz.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.Assert;
+
+import java.util.Date;
+
+import static cn.escheduler.api.quartz.QuartzExecutors.buildJobGroupName;
+import static cn.escheduler.api.quartz.QuartzExecutors.buildJobName;
+
+/**
+ * process schedule job
+ *
+ * {@link Job}
+ *
+ */
+public class ProcessScheduleJob implements Job {
+
+ private static final Logger logger = LoggerFactory.getLogger(ProcessScheduleJob.class);
+
+ /**
+ * {@link ProcessDao}
+ */
+ private static ProcessDao processDao;
+
+
+ /**
+ * init
+ */
+ public static void init(ProcessDao processDao) {
+ ProcessScheduleJob.processDao = processDao;
+ }
+
+ /**
+ *
+ * Called by the {@link Scheduler}
when a {@link Trigger}
+ * fires that is associated with the Job
.
+ *
+ *
+ *
+ * The implementation may wish to set a
+ * {@link JobExecutionContext#setResult(Object) result} object on the
+ * {@link JobExecutionContext} before this method exits. The result itself
+ * is meaningless to Quartz, but may be informative to
+ * {@link JobListener}s
or
+ * {@link TriggerListener}s
that are watching the job's
+ * execution.
+ *
+ *
+ * @throws JobExecutionException if there is an exception while executing the job.
+ */
+ @Override
+ public void execute(JobExecutionContext context) throws JobExecutionException {
+
+ Assert.notNull(processDao, "please call init() method first");
+
+ JobDataMap dataMap = context.getJobDetail().getJobDataMap();
+
+ int projectId = dataMap.getInt(Constants.PROJECT_ID);
+ int scheduleId = dataMap.getInt(Constants.SCHEDULE_ID);
+
+ /**
+ * The scheduled time the trigger fired for. For instance the scheduled
+ * time may have been 10:00:00 but the actual fire time may have been
+ * 10:00:03 if the scheduler was too busy.
+ *
+ * @return Returns the scheduledFireTime.
+ * @see #getFireTime()
+ */
+ Date scheduledFireTime = context.getScheduledFireTime();
+
+ /**
+ * The actual time the trigger fired. For instance the scheduled time may
+ * have been 10:00:00 but the actual fire time may have been 10:00:03 if
+ * the scheduler was too busy.
+ *
+ * @return Returns the fireTime.
+ * @see #getScheduledFireTime()
+ */
+ Date fireTime = context.getFireTime();
+
+ logger.info("scheduled fire time :{}, fire time :{}, process id :{}", scheduledFireTime, fireTime, scheduleId);
+
+ // query schedule
+ Schedule schedule = processDao.querySchedule(scheduleId);
+ if (schedule == null) {
+ logger.warn("process schedule does not exist in db,delete schedule job in quartz, projectId:{}, scheduleId:{}", projectId, scheduleId);
+ deleteJob(projectId, scheduleId);
+ return;
+ }
+
+
+ ProcessDefinition processDefinition = processDao.findProcessDefineById(schedule.getProcessDefinitionId());
+ // release state : online/offline
+ ReleaseState releaseState = processDefinition.getReleaseState();
+ if (processDefinition == null || releaseState == ReleaseState.OFFLINE) {
+ logger.warn("process definition does not exist in db or offline,need not to create command, projectId:{}, processId:{}", projectId, scheduleId);
+ return;
+ }
+
+ Command command = new Command();
+ command.setCommandType(CommandType.START_PROCESS);
+ command.setExecutorId(schedule.getUserId());
+ command.setFailureStrategy(schedule.getFailureStrategy());
+ command.setProcessDefinitionId(schedule.getProcessDefinitionId());
+ command.setScheduleTime(scheduledFireTime);
+ command.setStartTime(fireTime);
+ command.setWarningGroupId(schedule.getWarningGroupId());
+ command.setWarningType(schedule.getWarningType());
+ command.setProcessInstancePriority(schedule.getProcessInstancePriority());
+
+ processDao.createCommand(command);
+ }
+
+
+ /**
+ * delete job
+ */
+ private void deleteJob(int projectId, int scheduleId) {
+ String jobName = buildJobName(scheduleId);
+ String jobGroupName = buildJobGroupName(projectId);
+ QuartzExecutors.getInstance().deleteJob(jobName, jobGroupName);
+ }
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/quartz/QuartzExecutors.java b/escheduler-api/src/main/java/cn/escheduler/api/quartz/QuartzExecutors.java
new file mode 100644
index 0000000000..92e351d5cd
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/quartz/QuartzExecutors.java
@@ -0,0 +1,305 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.quartz;
+
+import cn.escheduler.common.Constants;
+import cn.escheduler.common.utils.JSONUtils;
+import cn.escheduler.dao.model.Schedule;
+import org.apache.commons.lang.StringUtils;
+import org.quartz.*;
+import org.quartz.impl.StdSchedulerFactory;
+import org.quartz.impl.matchers.GroupMatcher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Calendar;
+import java.util.*;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static org.quartz.CronScheduleBuilder.cronSchedule;
+import static org.quartz.JobBuilder.newJob;
+import static org.quartz.TriggerBuilder.newTrigger;
+
+/**
+ * single Quartz executors instance
+ */
+public class QuartzExecutors {
+
+ private static final Logger logger = LoggerFactory.getLogger(QuartzExecutors.class);
+
+ private final ReadWriteLock lock = new ReentrantReadWriteLock();
+
+
+ /**
+ *
+ * A Scheduler
maintains a registry of {@link org.quartz.JobDetail}
s
+ * and {@link Trigger}
s. Once registered, the Scheduler
+ * is responsible for executing Job
s when their associated
+ * Trigger
s fire (when their scheduled time arrives).
+ *
+ * {@link Scheduler}
+ */
+ private static Scheduler scheduler;
+
+ private static volatile QuartzExecutors INSTANCE = null;
+
+ private QuartzExecutors() {}
+
+ /**
+ * thread safe and performance promote
+ * @return
+ */
+ public static QuartzExecutors getInstance() {
+ if (INSTANCE == null) {
+ synchronized (QuartzExecutors.class) {
+ // when more than two threads run into the first null check same time, to avoid instanced more than one time, it needs to be checked again.
+ if (INSTANCE == null) {
+ INSTANCE = new QuartzExecutors();
+ //finish QuartzExecutors init
+ INSTANCE.init();
+ }
+ }
+ }
+ return INSTANCE;
+ }
+
+
+ /**
+ * init
+ *
+ *
+ * Returns a client-usable handle to a Scheduler
.
+ *
+ */
+ private void init() {
+ try {
+ SchedulerFactory schedulerFactory = new StdSchedulerFactory(Constants.QUARTZ_PROPERTIES_PATH);
+ scheduler = schedulerFactory.getScheduler();
+
+ } catch (SchedulerException e) {
+ logger.error(e.getMessage(),e);
+ System.exit(1);
+ }
+
+ }
+
+ /**
+ * Whether the scheduler has been started.
+ *
+ *
+ * Note: This only reflects whether {@link #start()}
has ever
+ * been called on this Scheduler, so it will return true
even
+ * if the Scheduler
is currently in standby mode or has been
+ * since shutdown.
+ *
+ *
+ * @see Scheduler#start()
+ */
+ public void start() throws SchedulerException {
+ if (!scheduler.isStarted()){
+ scheduler.start();
+ logger.info("Quartz service started" );
+ }
+ }
+
+ /**
+ * stop all scheduled tasks
+ *
+ * Halts the Scheduler
's firing of {@link Trigger}s
,
+ * and cleans up all resources associated with the Scheduler. Equivalent to
+ * shutdown(false)
.
+ *
+ *
+ * The scheduler cannot be re-started.
+ *
+ *
+ */
+ public void shutdown() throws SchedulerException {
+ if (!scheduler.isShutdown()) {
+ // don't wait for the task to complete
+ scheduler.shutdown();
+ logger.info("Quartz service stopped, and halt all tasks");
+ }
+ }
+
+
+ /**
+ * add task trigger , if this task already exists, return this task with updated trigger
+ *
+ * @param clazz job class name
+ * @param jobName job name
+ * @param jobGroupName job group name
+ * @param startDate job start date
+ * @param endDate job end date
+ * @param cronExpression cron expression
+ * @param jobDataMap job parameters data map
+ * @return
+ */
+ public void addJob(Class extends Job> clazz,String jobName,String jobGroupName,Date startDate, Date endDate,
+ String cronExpression,
+ Map jobDataMap) {
+ lock.writeLock().lock();
+ try {
+
+ JobKey jobKey = new JobKey(jobName, jobGroupName);
+ JobDetail jobDetail;
+ //add a task (if this task already exists, return this task directly)
+ if (scheduler.checkExists(jobKey)) {
+
+ jobDetail = scheduler.getJobDetail(jobKey);
+ if (jobDataMap != null) {
+ jobDetail.getJobDataMap().putAll(jobDataMap);
+ }
+ } else {
+ jobDetail = newJob(clazz).withIdentity(jobKey).build();
+
+ if (jobDataMap != null) {
+ jobDetail.getJobDataMap().putAll(jobDataMap);
+ }
+
+ scheduler.addJob(jobDetail, false, true);
+
+ logger.info("Add job, job name: {}, group name: {}",
+ jobName, jobGroupName);
+ }
+
+ TriggerKey triggerKey = new TriggerKey(jobName, jobGroupName);
+ /**
+ * Instructs the {@link Scheduler}
that upon a mis-fire
+ * situation, the {@link CronTrigger}
wants to have it's
+ * next-fire-time updated to the next time in the schedule after the
+ * current time (taking into account any associated {@link Calendar}
,
+ * but it does not want to be fired now.
+ */
+ CronTrigger cronTrigger = newTrigger().withIdentity(triggerKey).startAt(startDate).endAt(endDate)
+ .withSchedule(cronSchedule(cronExpression).withMisfireHandlingInstructionDoNothing())
+ .forJob(jobDetail).build();
+
+ if (scheduler.checkExists(triggerKey)) {
+ // updateProcessInstance scheduler trigger when scheduler cycle changes
+ CronTrigger oldCronTrigger = (CronTrigger) scheduler.getTrigger(triggerKey);
+ String oldCronExpression = oldCronTrigger.getCronExpression();
+
+ if (!StringUtils.equalsIgnoreCase(cronExpression,oldCronExpression)) {
+ // reschedule job trigger
+ scheduler.rescheduleJob(triggerKey, cronTrigger);
+ logger.info("reschedule job trigger, triggerName: {}, triggerGroupName: {}, cronExpression: {}, startDate: {}, endDate: {}",
+ jobName, jobGroupName, cronExpression, startDate, endDate);
+ }
+ } else {
+ scheduler.scheduleJob(cronTrigger);
+ logger.info("schedule job trigger, triggerName: {}, triggerGroupName: {}, cronExpression: {}, startDate: {}, endDate: {}",
+ jobName, jobGroupName, cronExpression, startDate, endDate);
+ }
+
+ } catch (Exception e) {
+ logger.error("add job failed", e);
+ throw new RuntimeException("add job failed:"+e.getMessage());
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
+
+ /**
+ * delete job
+ *
+ * @param jobName
+ * @param jobGroupName
+ * @return true if the Job was found and deleted.
+ */
+ public boolean deleteJob(String jobName, String jobGroupName) {
+ lock.writeLock().lock();
+ try {
+ logger.info("try to delete job, job name: {}, job group name: {},", jobName, jobGroupName);
+ return scheduler.deleteJob(new JobKey(jobName, jobGroupName));
+ } catch (SchedulerException e) {
+ logger.error(String.format("delete job : %s failed",jobName), e);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ return false;
+ }
+
+ /**
+ * delete all jobs in job group
+ *
+ * Note that while this bulk operation is likely more efficient than
+ * invoking deleteJob(JobKey jobKey)
several
+ * times, it may have the adverse affect of holding data locks for a
+ * single long duration of time (rather than lots of small durations
+ * of time).
+ *
+ *
+ * @param jobGroupName
+ *
+ * @return true if all of the Jobs were found and deleted, false if
+ * one or more were not deleted.
+ */
+ public boolean deleteAllJobs(String jobGroupName) {
+ lock.writeLock().lock();
+ try {
+ logger.info("try to delete all jobs in job group: {}", jobGroupName);
+ List jobKeys = new ArrayList<>();
+ jobKeys.addAll(scheduler.getJobKeys(GroupMatcher.groupEndsWith(jobGroupName)));
+
+ return scheduler.deleteJobs(jobKeys);
+ } catch (SchedulerException e) {
+ logger.error(String.format("delete all jobs in job group: %s failed",jobGroupName), e);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ return false;
+ }
+
+ /**
+ * build job name
+ */
+ public static String buildJobName(int processId) {
+ StringBuilder sb = new StringBuilder(30);
+ sb.append(Constants.QUARTZ_JOB_PRIFIX).append(Constants.UNDERLINE).append(processId);
+ return sb.toString();
+ }
+
+ /**
+ * build job group name
+ */
+ public static String buildJobGroupName(int projectId) {
+ StringBuilder sb = new StringBuilder(30);
+ sb.append(Constants.QUARTZ_JOB_GROUP_PRIFIX).append(Constants.UNDERLINE).append(projectId);
+ return sb.toString();
+ }
+
+ /**
+ * add params to map
+ *
+ * @param projectId
+ * @param scheduleId
+ * @param schedule
+ * @return
+ */
+ public static Map buildDataMap(int projectId, int scheduleId, Schedule schedule) {
+ Map dataMap = new HashMap<>(3);
+ dataMap.put(Constants.PROJECT_ID, projectId);
+ dataMap.put(Constants.SCHEDULE_ID, scheduleId);
+ dataMap.put(Constants.SCHEDULE, JSONUtils.toJson(schedule));
+
+ return dataMap;
+ }
+
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/AlertGroupService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/AlertGroupService.java
new file mode 100644
index 0000000000..9ef37020db
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/service/AlertGroupService.java
@@ -0,0 +1,294 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.service;
+
+import cn.escheduler.api.enums.Status;
+import cn.escheduler.api.utils.Constants;
+import cn.escheduler.api.utils.PageInfo;
+import cn.escheduler.api.utils.Result;
+import cn.escheduler.common.enums.AlertType;
+import cn.escheduler.common.enums.UserType;
+import cn.escheduler.dao.mapper.AlertGroupMapper;
+import cn.escheduler.dao.mapper.UserAlertGroupMapper;
+import cn.escheduler.dao.model.AlertGroup;
+import cn.escheduler.dao.model.User;
+import cn.escheduler.dao.model.UserAlertGroup;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * alert group service
+ */
+@Service
+public class AlertGroupService {
+
+ private static final Logger logger = LoggerFactory.getLogger(AlertGroupService.class);
+
+ @Autowired
+ private AlertGroupMapper alertGroupMapper;
+
+ @Autowired
+ private UserAlertGroupMapper userAlertGroupMapper;
+
+ /**
+ * query alert group list
+ *
+ * @return
+ */
+ public HashMap queryAlertgroup() {
+
+ HashMap result = new HashMap<>(5);
+ List alertGroups = alertGroupMapper.queryAllGroupList();
+ result.put(Constants.DATA_LIST, alertGroups);
+ putMsg(result, Status.SUCCESS);
+
+ return result;
+ }
+
+ /**
+ * paging query alarm group list
+ *
+ * @param loginUser
+ * @param searchVal
+ * @param pageNo
+ * @param pageSize
+ * @return
+ */
+ public Map listPaging(User loginUser, String searchVal, Integer pageNo, Integer pageSize) {
+
+ Map result = new HashMap<>(5);
+
+ Integer count = alertGroupMapper.countAlertGroupPaging(searchVal);
+
+ PageInfo pageInfo = new PageInfo<>(pageNo, pageSize);
+
+ List scheduleList = alertGroupMapper.queryAlertGroupPaging(searchVal, pageInfo.getStart(), pageSize);
+
+ pageInfo.setTotalCount(count);
+ pageInfo.setLists(scheduleList);
+ result.put(Constants.DATA_LIST, pageInfo);
+ putMsg(result, Status.SUCCESS);
+
+ return result;
+ }
+
+ /**
+ * create alert group
+ *
+ * @param loginUser
+ * @param groupName
+ * @param groupType
+ * @param desc
+ * @return
+ */
+ public Map createAlertgroup(User loginUser, String groupName, AlertType groupType, String desc) {
+ Map result = new HashMap<>(5);
+ //only admin can operate
+ if (checkAdmin(loginUser, result)){
+ return result;
+ }
+
+ AlertGroup alertGroup = new AlertGroup();
+ Date now = new Date();
+
+ alertGroup.setGroupName(groupName);
+ alertGroup.setGroupType(groupType);
+ alertGroup.setDesc(desc);
+ alertGroup.setCreateTime(now);
+ alertGroup.setUpdateTime(now);
+
+ // insert
+ int insert = alertGroupMapper.insert(alertGroup);
+
+ if (insert > 0) {
+ putMsg(result, Status.SUCCESS);
+ } else {
+ putMsg(result, Status.CREATE_ALERT_GROUP_ERROR);
+ }
+ return result;
+ }
+
+ /**
+ * check user is admin or not
+ *
+ * @param user
+ * @return
+ */
+ public boolean isAdmin(User user) {
+ return user.getUserType() == UserType.ADMIN_USER;
+ }
+
+ /**
+ * updateProcessInstance alert group
+ *
+ * @param loginUser
+ * @param id
+ * @param groupName
+ * @param groupType
+ * @param desc
+ * @return
+ */
+ public Map updateAlertgroup(User loginUser, int id, String groupName, AlertType groupType, String desc) {
+ Map result = new HashMap<>(5);
+
+ if (checkAdmin(loginUser, result)){
+ return result;
+ }
+
+
+ AlertGroup alertGroup = alertGroupMapper.queryById(id);
+
+ if (alertGroup == null) {
+ putMsg(result, Status.ALERT_GROUP_NOT_EXIST);
+ return result;
+
+ }
+
+ Date now = new Date();
+
+ if (StringUtils.isNotEmpty(groupName)) {
+ alertGroup.setGroupName(groupName);
+ }
+
+ if (groupType != null) {
+ alertGroup.setGroupType(groupType);
+ }
+ alertGroup.setDesc(desc);
+ alertGroup.setUpdateTime(now);
+ // updateProcessInstance
+ alertGroupMapper.update(alertGroup);
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+ /**
+ * delete alert group by id
+ *
+ * @param loginUser
+ * @param id
+ * @return
+ */
+ public Map delAlertgroupById(User loginUser, int id) {
+ Map result = new HashMap<>(5);
+ result.put(Constants.STATUS, false);
+
+ //only admin can operate
+ if (checkAdmin(loginUser, result)){
+ return result;
+ }
+
+
+ alertGroupMapper.delete(id);
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+
+ /**
+ * grant user
+ *
+ * @param loginUser
+ * @param alertgroupId
+ * @param userIds
+ * @return
+ */
+ public Map grantUser(User loginUser, int alertgroupId, String userIds) {
+ Map result = new HashMap<>(5);
+ result.put(Constants.STATUS, false);
+
+ //only admin can operate
+ if (checkAdmin(loginUser, result)){
+ return result;
+ }
+
+ userAlertGroupMapper.deleteByAlertgroupId(alertgroupId);
+ if (StringUtils.isEmpty(userIds)) {
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+ String[] userIdsArr = userIds.split(",");
+
+ for (String userId : userIdsArr) {
+ Date now = new Date();
+ UserAlertGroup userAlertGroup = new UserAlertGroup();
+ userAlertGroup.setAlertgroupId(alertgroupId);
+ userAlertGroup.setUserId(Integer.parseInt(userId));
+ userAlertGroup.setCreateTime(now);
+ userAlertGroup.setUpdateTime(now);
+ userAlertGroupMapper.insert(userAlertGroup);
+ }
+
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+ /**
+ * verify group name exists
+ *
+ * @param loginUser
+ * @param groupName
+ * @return
+ */
+ public Result verifyGroupName(User loginUser, String groupName) {
+ Result result = new Result();
+ AlertGroup alertGroup = alertGroupMapper.queryByGroupName(groupName);
+ if (alertGroup != null) {
+ logger.error("group {} has exist, can't create again.", groupName);
+ result.setCode(Status.ALERT_GROUP_EXIST.getCode());
+ result.setMsg(Status.ALERT_GROUP_EXIST.getMsg());
+ } else {
+ result.setCode(Status.SUCCESS.getCode());
+ result.setMsg(Status.SUCCESS.getMsg());
+ }
+
+ return result;
+ }
+
+ /**
+ * is admin?
+ * @param loginUser
+ * @param result
+ * @return
+ */
+ private boolean checkAdmin(User loginUser, Map result) {
+ if (!isAdmin(loginUser)) {
+ putMsg(result, Status.USER_NO_OPERATION_PERM);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * put message
+ *
+ * @param result
+ * @param status
+ */
+ private void putMsg(Map result, Status status) {
+ result.put(Constants.STATUS, status);
+ result.put(Constants.MSG, status.getMsg());
+ }
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/BaseDAGService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/BaseDAGService.java
new file mode 100644
index 0000000000..76d16c13ab
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/service/BaseDAGService.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.service;
+
+import cn.escheduler.common.graph.DAG;
+import cn.escheduler.common.model.TaskNode;
+import cn.escheduler.common.model.TaskNodeRelation;
+import cn.escheduler.common.process.ProcessDag;
+import cn.escheduler.common.utils.CollectionUtils;
+import cn.escheduler.common.utils.JSONUtils;
+import cn.escheduler.dao.model.ProcessData;
+import cn.escheduler.dao.model.ProcessInstance;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * base DAG service
+ */
+public class BaseDAGService extends BaseService{
+
+
+ /**
+ * process instance to DAG
+ *
+ * @param processInstance
+ * @return
+ * @throws Exception
+ */
+ public static DAG processInstance2DAG(ProcessInstance processInstance) throws Exception {
+
+ String processDefinitionJson = processInstance.getProcessInstanceJson();
+
+ ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
+
+ List taskNodeList = processData.getTasks();
+
+ List taskNodeRelations = new ArrayList<>();
+
+ //Traversing node information and building relationships
+ for (TaskNode taskNode : taskNodeList) {
+ String preTasks = taskNode.getPreTasks();
+ List preTasksList = JSONUtils.toList(preTasks, String.class);
+
+ //if previous tasks not empty
+ if (preTasksList != null) {
+ for (String depNode : preTasksList) {
+ taskNodeRelations.add(new TaskNodeRelation(depNode, taskNode.getName()));
+ }
+ }
+ }
+
+ ProcessDag processDag = new ProcessDag();
+ processDag.setEdges(taskNodeRelations);
+ processDag.setNodes(taskNodeList);
+
+
+ // generate detail Dag, to be executed
+ DAG dag = new DAG<>();
+
+ if (CollectionUtils.isNotEmpty(processDag.getNodes())) {
+ for (TaskNode node : processDag.getNodes()) {
+ dag.addNode(node.getName(), node);
+ }
+ }
+
+ if (CollectionUtils.isNotEmpty(processDag.getEdges())) {
+ for (TaskNodeRelation edge : processDag.getEdges()) {
+ dag.addEdge(edge.getStartNode(), edge.getEndNode());
+ }
+ }
+
+ return dag;
+ }
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/BaseService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/BaseService.java
new file mode 100644
index 0000000000..92113a5a67
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/service/BaseService.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.service;
+
+import cn.escheduler.api.enums.Status;
+import cn.escheduler.api.utils.Constants;
+import cn.escheduler.api.utils.Result;
+import cn.escheduler.common.enums.UserType;
+import cn.escheduler.dao.model.User;
+import org.apache.commons.lang3.StringUtils;
+
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServletRequest;
+import java.text.MessageFormat;
+import java.util.Map;
+
+/**
+ * base service
+ */
+public class BaseService {
+
+ /**
+ * check admin
+ *
+ * @param user
+ * @return
+ */
+ protected boolean isAdmin(User user) {
+ return user.getUserType() == UserType.ADMIN_USER;
+ }
+
+ /**
+ * check admin
+ *
+ * @param loginUser
+ * @param result
+ * @return
+ */
+ protected boolean checkAdmin(User loginUser, Map result) {
+ //only admin can operate
+ if (!isAdmin(loginUser)) {
+ putMsg(result, Status.USER_NO_OPERATION_PERM);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * put message to map
+ *
+ * @param result
+ * @param status
+ * @param statusParams
+ */
+ protected void putMsg(Map result, Status status, Object... statusParams) {
+ result.put(Constants.STATUS, status);
+ if (statusParams != null && statusParams.length > 0) {
+ result.put(Constants.MSG, MessageFormat.format(status.getMsg(), statusParams));
+ } else {
+ result.put(Constants.MSG, status.getMsg());
+ }
+ }
+
+ /**
+ * put message to result object
+ *
+ * @param result
+ * @param status
+ */
+ protected void putMsg(Result result, Status status, Object... statusParams) {
+ result.setCode(status.getCode());
+
+ if (statusParams != null && statusParams.length > 0) {
+ result.setMsg(MessageFormat.format(status.getMsg(), statusParams));
+ } else {
+ result.setMsg(status.getMsg());
+ }
+
+ }
+
+ /**
+ * get cookie info by name
+ * @param request
+ * @param name
+ * @return get cookie info
+ */
+ public static Cookie getCookie(HttpServletRequest request, String name) {
+ Cookie[] cookies = request.getCookies();
+ if (cookies != null && cookies.length > 0) {
+ for (Cookie cookie : cookies) {
+ if (StringUtils.equalsIgnoreCase(name, cookie.getName())) {
+ return cookie;
+ }
+ }
+ }
+
+ return null;
+ }
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/DataAnalysisService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/DataAnalysisService.java
new file mode 100644
index 0000000000..8cceed11e0
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/service/DataAnalysisService.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.service;
+
+
+import cn.escheduler.api.dto.DefineUserDto;
+import cn.escheduler.api.dto.TaskCountDto;
+import cn.escheduler.api.enums.Status;
+import cn.escheduler.api.utils.Constants;
+import cn.escheduler.common.utils.DateUtils;
+import cn.escheduler.dao.mapper.ProcessDefinitionMapper;
+import cn.escheduler.dao.mapper.ProcessInstanceMapper;
+import cn.escheduler.dao.mapper.ProjectMapper;
+import cn.escheduler.dao.mapper.TaskInstanceMapper;
+import cn.escheduler.dao.model.DefinitionGroupByUser;
+import cn.escheduler.dao.model.ExecuteStatusCount;
+import cn.escheduler.dao.model.Project;
+import cn.escheduler.dao.model.User;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import java.text.MessageFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * data analysis service
+ */
+@Service
+public class DataAnalysisService {
+
+ private static final Logger logger = LoggerFactory.getLogger(DataAnalysisService.class);
+
+ @Autowired
+ ProjectMapper projectMapper;
+
+ @Autowired
+ ProjectService projectService;
+
+ @Autowired
+ TaskInstanceMapper taskInstanceMapper;
+
+ @Autowired
+ ProcessInstanceMapper processInstanceMapper;
+
+ @Autowired
+ ProcessDefinitionMapper processDefinitionMapper;
+
+ /**
+ * statistical task instance status data
+ *
+ * @param loginUser
+ * @param projectId
+ * @param startDate
+ * @param endDate
+ * @return
+ */
+ public Map countTaskStateByProject(User loginUser, int projectId, String startDate, String endDate) {
+
+ Map result = new HashMap<>(5);
+ if(projectId != 0){
+ Project project = projectMapper.queryById(projectId);
+ result = projectService.checkProjectAndAuth(loginUser, project, String.valueOf(projectId));
+
+ if (getResultStatus(result)){
+ return result;
+ }
+ }
+
+ /**
+ * find all the task lists in the project under the user
+ * statistics based on task status execution, failure, completion, wait, total
+ */
+ Date start = null;
+ Date end = null;
+
+ try {
+ start = DateUtils.getScheduleDate(startDate);
+ end = DateUtils.getScheduleDate(endDate);
+ } catch (Exception e) {
+ logger.error(e.getMessage(),e);
+ putErrorRequestParamsMsg(result);
+ return result;
+ }
+
+ List taskInstanceStateCounts =
+ taskInstanceMapper.countTaskInstanceStateByUser(loginUser.getId(),
+ loginUser.getUserType(), start, end, projectId);
+
+ TaskCountDto taskCountResult = new TaskCountDto(taskInstanceStateCounts);
+ if (taskInstanceStateCounts != null) {
+ result.put(Constants.DATA_LIST, taskCountResult);
+ putMsg(result, Status.SUCCESS);
+ } else {
+ putMsg(result, Status.TASK_INSTANCE_STATE_COUNT_ERROR);
+ }
+ return result;
+ }
+
+ private void putErrorRequestParamsMsg(Map result) {
+ result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
+ result.put(Constants.MSG, MessageFormat.format(Status.REQUEST_PARAMS_NOT_VALID_ERROR.getMsg(), "startDate,endDate"));
+ }
+
+ /**
+ * statistical process instance status data
+ *
+ * @param loginUser
+ * @param projectId
+ * @param startDate
+ * @param endDate
+ * @return
+ */
+ public Map countProcessInstanceStateByProject(User loginUser, int projectId, String startDate, String endDate) {
+
+ Map result = new HashMap<>(5);
+ if(projectId != 0){
+ Project project = projectMapper.queryById(projectId);
+ result = projectService.checkProjectAndAuth(loginUser, project, String.valueOf(projectId));
+
+ if (getResultStatus(result)){
+ return result;
+ }
+ }
+
+ Date start = null;
+ Date end = null;
+ try {
+ start = DateUtils.getScheduleDate(startDate);
+ end = DateUtils.getScheduleDate(endDate);
+ } catch (Exception e) {
+ logger.error(e.getMessage(),e);
+ putErrorRequestParamsMsg(result);
+ return result;
+ }
+ List processInstanceStateCounts =
+ processInstanceMapper.countInstanceStateByUser(loginUser.getId(),
+ loginUser.getUserType(), start, end, projectId );
+
+ TaskCountDto taskCountResult = new TaskCountDto(processInstanceStateCounts);
+ if (processInstanceStateCounts != null) {
+ result.put(Constants.DATA_LIST, taskCountResult);
+ putMsg(result, Status.SUCCESS);
+ } else {
+ putMsg(result, Status.COUNT_PROCESS_INSTANCE_STATE_ERROR);
+ }
+ return result;
+ }
+
+
+ /**
+ * statistics the process definition quantities of certain person
+ *
+ * @param loginUser
+ * @param projectId
+ * @return
+ */
+ public Map countDefinitionByUser(User loginUser, int projectId) {
+ Map result = new HashMap<>();
+
+ List defineGroupByUsers = processDefinitionMapper.countDefinitionGroupByUser(loginUser.getId(), loginUser.getUserType(), projectId);
+
+ DefineUserDto dto = new DefineUserDto(defineGroupByUsers);
+ result.put(Constants.DATA_LIST, dto);
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+ /**
+ *
+ * @param result
+ * @param status
+ */
+ private void putMsg(Map result, Status status) {
+ result.put(Constants.STATUS, status);
+ result.put(Constants.MSG, status.getMsg());
+ }
+
+ /**
+ * get result status
+ * @param result
+ * @return
+ */
+ private boolean getResultStatus(Map result) {
+ Status resultEnum = (Status) result.get(Constants.STATUS);
+ if (resultEnum != Status.SUCCESS) {
+ return true;
+ }
+ return false;
+ }
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/DataSourceService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/DataSourceService.java
new file mode 100644
index 0000000000..d5371e5f0a
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/service/DataSourceService.java
@@ -0,0 +1,603 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.service;
+
+import cn.escheduler.api.enums.Status;
+import cn.escheduler.api.utils.Constants;
+import cn.escheduler.api.utils.PageInfo;
+import cn.escheduler.api.utils.Result;
+import cn.escheduler.common.enums.DbType;
+import cn.escheduler.common.job.db.*;
+import cn.escheduler.dao.mapper.DataSourceMapper;
+import cn.escheduler.dao.mapper.DatasourceUserMapper;
+import cn.escheduler.dao.mapper.ProjectMapper;
+import cn.escheduler.dao.model.DataSource;
+import cn.escheduler.dao.model.Resource;
+import cn.escheduler.dao.model.User;
+import com.alibaba.fastjson.JSONObject;
+import com.alibaba.fastjson.TypeReference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+import org.springframework.transaction.annotation.Transactional;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.*;
+
+/**
+ * datasource service
+ */
+@Service
+public class DataSourceService extends BaseService{
+
+ private static final Logger logger = LoggerFactory.getLogger(DataSourceService.class);
+
+ public static final String NAME = "name";
+ public static final String NOTE = "note";
+ public static final String TYPE = "type";
+ public static final String HOST = "host";
+ public static final String PORT = "port";
+ public static final String DATABASE = "database";
+ public static final String USER_NAME = "userName";
+ public static final String PASSWORD = "password";
+ public static final String OTHER = "other";
+
+ @Autowired
+ private ProjectMapper projectMapper;
+
+ @Autowired
+ private DataSourceMapper dataSourceMapper;
+
+ @Autowired
+ private ProjectService projectService;
+
+ @Autowired
+ private DatasourceUserMapper datasourceUserMapper;
+
+ /**
+ * create data source
+ *
+ * @param loginUser
+ * @param name
+ * @param desc
+ * @param type
+ * @param parameter
+ * @return
+ */
+ public Map createDataSource(User loginUser, String name, String desc, DbType type, String parameter) {
+
+ Map result = new HashMap<>(5);
+ // check name can use or not
+ if (checkName(name, result)) {
+ return result;
+ }
+ Boolean isConnection = checkConnection(type, parameter);
+ if (!isConnection) {
+ logger.info("connect failed, type:{}, parameter:{}", type, parameter);
+ putMsg(result, Status.DATASOURCE_CONNECT_FAILED);
+ return result;
+ }
+
+ BaseDataSource datasource = DataSourceFactory.getDatasource(type, parameter);
+ if (datasource == null) {
+ putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, parameter);
+ return result;
+ }
+
+ // build datasource
+ DataSource dataSource = new DataSource();
+ Date now = new Date();
+
+ dataSource.setName(name.trim());
+ dataSource.setNote(desc);
+ dataSource.setUserId(loginUser.getId());
+ dataSource.setUserName(loginUser.getUserName());
+ dataSource.setType(type);
+ dataSource.setConnectionParams(parameter);
+ dataSource.setCreateTime(now);
+ dataSource.setUpdateTime(now);
+ dataSourceMapper.insert(dataSource);
+
+ putMsg(result, Status.SUCCESS);
+
+ return result;
+ }
+
+
+ /**
+ * updateProcessInstance datasource
+ *
+ * @param loginUser
+ * @param name
+ * @param desc
+ * @param type
+ * @param parameter
+ * @return
+ */
+ public Map updateDataSource(int id, User loginUser, String name, String desc, DbType type, String parameter) {
+
+ Map result = new HashMap<>();
+ // determine whether the data source exists
+ DataSource dataSource = dataSourceMapper.queryById(id);
+ if (dataSource == null) {
+ putMsg(result, Status.RESOURCE_NOT_EXIST);
+ return result;
+ }
+
+ //check name can use or not
+ if(!name.trim().equals(dataSource.getName()) && checkName(name, result)){
+ return result;
+ }
+
+ Boolean isConnection = checkConnection(type, parameter);
+ if (!isConnection) {
+ logger.info("connect failed, type:{}, parameter:{}", type, parameter);
+ putMsg(result, Status.DATASOURCE_CONNECT_FAILED);
+ return result;
+ }
+ Date now = new Date();
+
+ dataSource.setName(name.trim());
+ dataSource.setNote(desc);
+ dataSource.setUserName(loginUser.getUserName());
+ dataSource.setType(type);
+ dataSource.setConnectionParams(parameter);
+ dataSource.setUpdateTime(now);
+ dataSourceMapper.update(dataSource);
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+ private boolean checkName(String name, Map result) {
+ List queryDataSource = dataSourceMapper.queryDataSourceByName(name.trim());
+ if (queryDataSource != null && queryDataSource.size() > 0) {
+ putMsg(result, Status.DATASOURCE_EXIST);
+ return true;
+ }
+ return false;
+ }
+
+
+ /**
+ * updateProcessInstance datasource
+ */
+ public Map queryDataSource(int id) {
+
+ Map result = new HashMap(5);
+ DataSource dataSource = dataSourceMapper.queryById(id);
+ if (dataSource == null) {
+ putMsg(result, Status.RESOURCE_NOT_EXIST);
+ return result;
+ }
+ // type
+ String dataSourceType = dataSource.getType().toString();
+ // name
+ String dataSourceName = dataSource.getName();
+ // desc
+ String desc = dataSource.getNote();
+ // parameter
+ String parameter = dataSource.getConnectionParams();
+
+ BaseDataSource datasourceForm = DataSourceFactory.getDatasource(dataSource.getType(), parameter);
+ String database = datasourceForm.getDatabase();
+ // jdbc connection params
+ String other = datasourceForm.getOther();
+ String address = datasourceForm.getAddress();
+
+ String[] hostsPorts = getHostsAndPort(address);
+ // ip host
+ String host = hostsPorts[0];
+ // prot
+ String port = hostsPorts[1];
+ String separator = "";
+
+ switch (dataSource.getType()) {
+ case HIVE:
+ separator = ";";
+ break;
+ case MYSQL:
+ separator = "&";
+ break;
+ case POSTGRESQL:
+ separator = "&";
+ break;
+ default:
+ separator = "&";
+ break;
+ }
+
+ Map otherMap = new LinkedHashMap();
+ if (other != null) {
+ String[] configs = other.split(separator);
+ for (String config : configs) {
+ otherMap.put(config.split("=")[0], config.split("=")[1]);
+ }
+
+ }
+
+ Map map = new HashMap<>(10);
+ map.put(NAME, dataSourceName);
+ map.put(NOTE, desc);
+ map.put(TYPE, dataSourceType);
+ map.put(HOST, host);
+ map.put(PORT, port);
+ map.put(DATABASE, database);
+ map.put(USER_NAME, datasourceForm.getUser());
+ map.put(PASSWORD, datasourceForm.getPassword());
+ map.put(OTHER, otherMap);
+ result.put(Constants.DATA_LIST, map);
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+
+ /**
+ * query datasource list by keyword
+ *
+ * @param loginUser
+ * @param searchVal
+ * @param pageNo
+ * @param pageSize
+ * @return
+ */
+ public Map queryDataSourceListPaging(User loginUser, String searchVal, Integer pageNo, Integer pageSize) {
+ Map result = new HashMap<>();
+
+ Integer count = getTotalCount(loginUser);
+
+ PageInfo pageInfo = new PageInfo(pageNo, pageSize);
+ pageInfo.setTotalCount(count);
+ List datasourceList = getDataSources(loginUser, searchVal, pageSize, pageInfo);
+
+ pageInfo.setLists(datasourceList);
+ result.put(Constants.DATA_LIST, pageInfo);
+ putMsg(result, Status.SUCCESS);
+
+ return result;
+ }
+
+ /**
+ * get list paging
+ *
+ * @param loginUser
+ * @param searchVal
+ * @param pageSize
+ * @param pageInfo
+ * @return
+ */
+ private List getDataSources(User loginUser, String searchVal, Integer pageSize, PageInfo pageInfo) {
+ if (isAdmin(loginUser)) {
+ return dataSourceMapper.queryAllDataSourcePaging(searchVal, pageInfo.getStart(), pageSize);
+ }
+ return dataSourceMapper.queryDataSourcePaging(loginUser.getId(), searchVal,
+ pageInfo.getStart(), pageSize);
+ }
+
+ /**
+ * get datasource total num
+ *
+ * @param loginUser
+ * @return
+ */
+ private Integer getTotalCount(User loginUser) {
+ if (isAdmin(loginUser)) {
+ return dataSourceMapper.countAllDatasource();
+ }
+ return dataSourceMapper.countUserDatasource(loginUser.getId());
+ }
+
+ /**
+ * query data resource list
+ *
+ * @param loginUser
+ * @param type
+ * @return
+ */
+ public Map queryDataSourceList(User loginUser, Integer type) {
+ Map result = new HashMap<>(5);
+ List datasourceList = dataSourceMapper.queryDataSourceByType(loginUser.getId(), type);
+
+ result.put(Constants.DATA_LIST, datasourceList);
+ putMsg(result, Status.SUCCESS);
+
+ return result;
+ }
+
+ /**
+ * verify datasource exists
+ *
+ * @param loginUser
+ * @param name
+ * @return
+ */
+ public Result verifyDataSourceName(User loginUser, String name) {
+ Result result = new Result();
+ List dataSourceList = dataSourceMapper.queryDataSourceByName(name);
+ if (dataSourceList != null && dataSourceList.size() > 0) {
+ logger.error("datasource name:{} has exist, can't create again.", name);
+ putMsg(result, Status.DATASOURCE_EXIST);
+ } else {
+ putMsg(result, Status.SUCCESS);
+ }
+
+ return result;
+ }
+
+ /**
+ * get connection
+ *
+ * @param dbType
+ * @param parameter
+ * @return
+ */
+ private Connection getConnection(DbType dbType, String parameter) {
+ Connection connection = null;
+ BaseDataSource datasource = null;
+ try {
+ switch (dbType) {
+ case POSTGRESQL:
+ datasource = JSONObject.parseObject(parameter, PostgreDataSource.class);
+ Class.forName(Constants.ORG_POSTGRESQL_DRIVER);
+ break;
+ case MYSQL:
+ datasource = JSONObject.parseObject(parameter, MySQLDataSource.class);
+ Class.forName(Constants.COM_MYSQL_JDBC_DRIVER);
+ break;
+ case HIVE:
+ datasource = JSONObject.parseObject(parameter, HiveDataSource.class);
+ Class.forName(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER);
+ break;
+ case SPARK:
+ datasource = JSONObject.parseObject(parameter, SparkDataSource.class);
+ Class.forName(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER);
+ break;
+ default:
+ break;
+ }
+ if(datasource != null){
+ connection = DriverManager.getConnection(datasource.getJdbcUrl(), datasource.getUser(), datasource.getPassword());
+ }
+ } catch (Exception e) {
+ logger.error(e.getMessage(),e);
+ }
+ return connection;
+ }
+
+
+ /**
+ * check connection
+ *
+ * @param type
+ * @param parameter
+ * @return
+ */
+ public boolean checkConnection(DbType type, String parameter) {
+ Boolean isConnection = false;
+ Connection con = getConnection(type, parameter);
+ if (con != null) {
+ isConnection = true;
+ }
+ return isConnection;
+ }
+
+
+ /**
+ * test connection
+ *
+ * @param loginUser
+ * @param id
+ * @return
+ */
+ public boolean connectionTest(User loginUser, int id) {
+ DataSource dataSource = dataSourceMapper.queryById(id);
+ return checkConnection(dataSource.getType(), dataSource.getConnectionParams());
+ }
+
+ /**
+ * build paramters
+ *
+ * @param name
+ * @param desc
+ * @param type
+ * @param host
+ * @param port
+ * @param database
+ * @param userName
+ * @param password
+ * @param other
+ * @return
+ */
+ public String buildParameter(String name, String desc, DbType type, String host, String port, String database, String userName, String password, String other) {
+
+ String address = buildAddress(type, host, port);
+ String jdbcUrl = address + "/" + database;
+ String separator = "";
+ if (Constants.MYSQL.equals(type.name()) || Constants.POSTGRESQL.equals(type.name())) {
+ separator = "&";
+ } else if (Constants.HIVE.equals(type.name()) || Constants.SPARK.equals(type.name())) {
+ separator = ";";
+ }
+
+ Map parameterMap = new LinkedHashMap(6);
+ parameterMap.put(Constants.ADDRESS, address);
+ parameterMap.put(Constants.DATABASE, database);
+ parameterMap.put(Constants.JDBC_URL, jdbcUrl);
+ parameterMap.put(Constants.USER, userName);
+ parameterMap.put(Constants.PASSWORD, password);
+ if (other != null && !"".equals(other)) {
+ Map map = JSONObject.parseObject(other, new TypeReference>() {
+ });
+ if (map.size() > 0) {
+ Set keys = map.keySet();
+ StringBuilder otherSb = new StringBuilder();
+ for (String key : keys) {
+ otherSb.append(String.format("%s=%s%s", key, map.get(key), separator));
+
+ }
+ otherSb.deleteCharAt(otherSb.length() - 1);
+ parameterMap.put(Constants.OTHER, otherSb);
+ }
+
+ }
+
+ logger.info("parameters map-----" + JSONObject.toJSONString(parameterMap));
+ return JSONObject.toJSONString(parameterMap);
+
+
+ }
+
+ private String buildAddress(DbType type, String host, String port) {
+ StringBuilder sb = new StringBuilder();
+ if (Constants.MYSQL.equals(type.name())) {
+ sb.append(Constants.JDBC_MYSQL);
+ sb.append(host).append(":").append(port);
+ } else if (Constants.POSTGRESQL.equals(type.name())) {
+ sb.append(Constants.JDBC_POSTGRESQL);
+ sb.append(host).append(":").append(port);
+ } else if (Constants.HIVE.equals(type.name()) || Constants.SPARK.equals(type.name())) {
+ sb.append(Constants.JDBC_HIVE_2);
+ String[] hostArray = host.split(",");
+ if (hostArray.length > 0) {
+ for (String zkHost : hostArray) {
+ sb.append(String.format("%s:%s,", zkHost, port));
+ }
+ sb.deleteCharAt(sb.length() - 1);
+ }
+ }
+
+ return sb.toString();
+ }
+
+ /**
+ * delete datasource
+ *
+ * @param loginUser
+ * @param datasourceId
+ * @return
+ */
+ @Transactional(value = "TransactionManager",rollbackFor = Exception.class)
+ public Result delete(User loginUser, int datasourceId) {
+ Result result = new Result();
+ try {
+ //query datasource by id
+ DataSource dataSource = dataSourceMapper.queryById(datasourceId);
+ if(dataSource == null){
+ logger.error("resource id {} not exist", datasourceId);
+ putMsg(result, Status.RESOURCE_NOT_EXIST);
+ return result;
+ }
+ if(loginUser.getId() != dataSource.getUserId()){
+ putMsg(result, Status.USER_NO_OPERATION_PERM);
+ return result;
+ }
+ dataSourceMapper.deleteDataSourceById(datasourceId);
+ datasourceUserMapper.deleteByDatasourceId(datasourceId);
+ putMsg(result, Status.SUCCESS);
+ } catch (Exception e) {
+ logger.error("delete datasource fail",e);
+ throw new RuntimeException("delete datasource fail");
+ }
+ return result;
+ }
+
+ /**
+ * unauthorized datasource
+ *
+ * @param loginUser
+ * @param userId
+ * @return
+ */
+ public Map unauthDatasource(User loginUser, Integer userId) {
+
+ Map result = new HashMap<>();
+ //only admin operate
+ if (!isAdmin(loginUser)) {
+ putMsg(result, Status.USER_NO_OPERATION_PERM);
+ return result;
+ }
+
+ /**
+ * query all data sources except userId
+ */
+ List resultList = new ArrayList<>();
+ List datasourceList = dataSourceMapper.queryDatasourceExceptUserId(userId);
+ Set datasourceSet = null;
+ if (datasourceList != null && datasourceList.size() > 0) {
+ datasourceSet = new HashSet<>(datasourceList);
+
+ List authedDataSourceList = dataSourceMapper.authedDatasource(userId);
+
+ Set authedDataSourceSet = null;
+ if (authedDataSourceList != null && authedDataSourceList.size() > 0) {
+ authedDataSourceSet = new HashSet<>(authedDataSourceList);
+ datasourceSet.removeAll(authedDataSourceSet);
+
+ }
+ resultList = new ArrayList<>(datasourceSet);
+ }
+ result.put(Constants.DATA_LIST, resultList);
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+
+ /**
+ * authorized datasource
+ *
+ * @param loginUser
+ * @param userId
+ * @return
+ */
+ public Map authedDatasource(User loginUser, Integer userId) {
+ Map result = new HashMap<>(5);
+
+ if (!isAdmin(loginUser)) {
+ putMsg(result, Status.USER_NO_OPERATION_PERM);
+ return result;
+ }
+
+ List authedDatasourceList = dataSourceMapper.authedDatasource(userId);
+ result.put(Constants.DATA_LIST, authedDatasourceList);
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+
+ /**
+ * get host and port by address
+ *
+ * @param address
+ * @return
+ */
+ private String[] getHostsAndPort(String address) {
+ String[] result = new String[2];
+ String[] tmpArray = address.split("//");
+ String hostsAndPorts = tmpArray[tmpArray.length - 1];
+ StringBuilder hosts = new StringBuilder("");
+ String[] hostPortArray = hostsAndPorts.split(",");
+ String port = hostPortArray[0].split(":")[1];
+ for (String hostPort : hostPortArray) {
+ hosts.append(hostPort.split(":")[0]).append(",");
+ }
+ hosts.deleteCharAt(hosts.length() - 1);
+ result[0] = hosts.toString();
+ result[1] = port;
+ return result;
+ }
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/ExecutorService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/ExecutorService.java
new file mode 100644
index 0000000000..de000233ec
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/service/ExecutorService.java
@@ -0,0 +1,495 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.service;
+
+
+import cn.escheduler.api.enums.ExecuteType;
+import cn.escheduler.api.enums.Status;
+import cn.escheduler.api.utils.Constants;
+import cn.escheduler.common.enums.*;
+import cn.escheduler.common.utils.DateUtils;
+import cn.escheduler.common.utils.JSONUtils;
+import cn.escheduler.dao.ProcessDao;
+import cn.escheduler.dao.mapper.ProcessDefinitionMapper;
+import cn.escheduler.dao.mapper.ProcessInstanceMapper;
+import cn.escheduler.dao.mapper.ProjectMapper;
+import cn.escheduler.dao.model.*;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import java.text.ParseException;
+import java.util.*;
+
+import static cn.escheduler.common.Constants.*;
+
+/**
+ * executor service
+ */
+@Service
+public class ExecutorService extends BaseService{
+
+ private static final Logger logger = LoggerFactory.getLogger(ExecutorService.class);
+
+ @Autowired
+ private ProjectMapper projectMapper;
+
+ @Autowired
+ private ProjectService projectService;
+
+ @Autowired
+ private ProcessDefinitionMapper processDefinitionMapper;
+
+ @Autowired
+ private ProcessDefinitionService processDefinitionService;
+
+
+ @Autowired
+ private ProcessInstanceMapper processInstanceMapper;
+
+
+ @Autowired
+ private ProcessDao processDao;
+
+ /**
+ * execute process instance
+ *
+ * @param loginUser login user
+ * @param projectName project name
+ * @param processDefinitionId process Definition Id
+ * @param cronTime cron time
+ * @param commandType command type
+ * @param failureStrategy failuer strategy
+ * @param startNodeList start nodelist
+ * @param taskDependType node dependency type
+ * @param warningType warning type
+ * @param warningGroupId notify group id
+ * @param receivers receivers
+ * @param receiversCc receivers cc
+ * @param timeout timeout
+ * @return
+ */
+ public Map execProcessInstance(User loginUser, String projectName,
+ int processDefinitionId, String cronTime, CommandType commandType,
+ FailureStrategy failureStrategy, String startNodeList,
+ TaskDependType taskDependType, WarningType warningType, int warningGroupId,
+ String receivers, String receiversCc, RunMode runMode,
+ Priority processInstancePriority, Integer timeout) throws ParseException {
+ Map result = new HashMap<>(5);
+ // timeout is valid
+ if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) {
+ putMsg(result,Status.TASK_TIMEOUT_PARAMS_ERROR);
+ return result;
+ }
+ Project project = projectMapper.queryByName(projectName);
+ Map checkResultAndAuth = checkResultAndAuth(loginUser, projectName, project);
+ if (checkResultAndAuth != null){
+ return checkResultAndAuth;
+ }
+
+ // check process define release state
+ ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineId(processDefinitionId);
+ result = checkProcessDefinitionValid(processDefinition, processDefinitionId);
+ if(result.get(Constants.STATUS) != Status.SUCCESS){
+ return result;
+ }
+
+ /**
+ * create command
+ */
+ int create = this.createCommand(commandType, processDefinitionId,
+ taskDependType, failureStrategy, startNodeList, cronTime, warningType, loginUser.getId(),
+ warningGroupId, runMode,processInstancePriority);
+ if(create > 0 ){
+ /**
+ * according to the process definition ID updateProcessInstance and CC recipient
+ */
+ processDefinitionMapper.updateReceiversAndCcById(receivers,receiversCc,processDefinitionId);
+ putMsg(result, Status.SUCCESS);
+ } else {
+ putMsg(result, Status.START_PROCESS_INSTANCE_ERROR);
+ }
+ return result;
+ }
+
+
+
+ /**
+ * check whether the process definition can be executed
+ *
+ * @param processDefinition
+ * @param processDefineId
+ * @return
+ */
+ public Map checkProcessDefinitionValid(ProcessDefinition processDefinition, int processDefineId){
+ Map result = new HashMap<>(5);
+ if (processDefinition == null) {
+ // check process definition exists
+ putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST,processDefineId);
+ } else if (processDefinition.getReleaseState() != ReleaseState.ONLINE) {
+ // check process definition online
+ putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE,processDefineId);
+ }else{
+ result.put(Constants.STATUS, Status.SUCCESS);
+ }
+ return result;
+ }
+
+
+
+ /**
+ * do action to process instance:pause, stop, repeat, recover from pause, recover from stop
+ *
+ * @param loginUser
+ * @param projectName
+ * @param processInstanceId
+ * @param executeType
+ * @return
+ */
+ public Map execute(User loginUser, String projectName, Integer processInstanceId, ExecuteType executeType) {
+ Map result = new HashMap<>(5);
+ Project project = projectMapper.queryByName(projectName);
+
+ Map checkResult = checkResultAndAuth(loginUser, projectName, project);
+ if (checkResult != null) {
+ return checkResult;
+ }
+
+ ProcessInstance processInstance = processDao.findProcessInstanceDetailById(processInstanceId);
+ if (processInstance == null) {
+ putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
+ return result;
+ }
+
+ ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
+ result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionId());
+ if (result.get(Constants.STATUS) != Status.SUCCESS) {
+ return result;
+ }
+
+ checkResult = checkExecuteType(processInstance, executeType);
+ Status status = (Status) checkResult.get(Constants.STATUS);
+ if (status != Status.SUCCESS) {
+ return checkResult;
+ }
+
+ switch (executeType) {
+ case REPEAT_RUNNING:
+ result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.REPEAT_RUNNING);
+ break;
+ case RECOVER_SUSPENDED_PROCESS:
+ result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.RECOVER_SUSPENDED_PROCESS);
+ break;
+ case START_FAILURE_TASK_PROCESS:
+ result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.START_FAILURE_TASK_PROCESS);
+ break;
+ case STOP:
+ if (processInstance.getState() == ExecutionStatus.READY_STOP) {
+ putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState());
+ } else {
+ processInstance.setCommandType(CommandType.STOP);
+ processInstance.addHistoryCmd(CommandType.STOP);
+ processDao.updateProcessInstance(processInstance);
+ result = updateProcessInstanceState(processInstanceId, ExecutionStatus.READY_STOP);
+ }
+ break;
+ case PAUSE:
+ if (processInstance.getState() == ExecutionStatus.READY_PAUSE) {
+ putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState());
+ } else {
+ processInstance.setCommandType(CommandType.PAUSE);
+ processInstance.addHistoryCmd(CommandType.PAUSE);
+ processDao.updateProcessInstance(processInstance);
+ result = updateProcessInstanceState(processInstanceId, ExecutionStatus.READY_PAUSE);
+ }
+ break;
+ default:
+ logger.error(String.format("unknown execute type : %s", executeType.toString()));
+ putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "unknown execute type");
+
+ break;
+ }
+ return result;
+ }
+
+ /**
+ * Check the state of process instance and the type of operation match
+ *
+ * @param processInstance
+ * @param executeType
+ * @return
+ */
+ private Map checkExecuteType(ProcessInstance processInstance, ExecuteType executeType) {
+
+ Map result = new HashMap<>(5);
+ ExecutionStatus executionStatus = processInstance.getState();
+ boolean checkResult = false;
+ switch (executeType) {
+ case PAUSE:
+ case STOP:
+ if (executionStatus.typeIsRunning()) {
+ checkResult = true;
+ }
+ break;
+ case REPEAT_RUNNING:
+ if (executionStatus.typeIsFinished()) {
+ checkResult = true;
+ }
+ break;
+ case START_FAILURE_TASK_PROCESS:
+ if (executionStatus.typeIsFailure()) {
+ checkResult = true;
+ }
+ break;
+ case RECOVER_SUSPENDED_PROCESS:
+ if (executionStatus.typeIsPause()) {
+ checkResult = true;
+ }
+ default:
+ break;
+ }
+ if (!checkResult) {
+ putMsg(result,Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstance.getName(), executionStatus.toString(), executeType.toString());
+ } else {
+ putMsg(result, Status.SUCCESS);
+ }
+ return result;
+ }
+
+ /**
+ * update process instance state
+ *
+ * @param processInstanceId
+ * @param executionStatus
+ * @return
+ */
+ private Map updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
+ Map result = new HashMap<>(5);
+
+ int update = processDao.updateProcessInstanceState(processInstanceId, executionStatus);
+ if (update > 0) {
+ putMsg(result, Status.SUCCESS);
+ } else {
+ putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR);
+ }
+
+ return result;
+ }
+
+ /**
+ * insert command, used in the implementation of the page, re run, recovery (pause / failure) execution
+ *
+ * @param loginUser
+ * @param instanceId
+ * @param processDefinitionId
+ * @param commandType
+ * @return
+ */
+ private Map insertCommand(User loginUser, Integer instanceId, Integer processDefinitionId, CommandType commandType) {
+ Map result = new HashMap<>(5);
+ Command command = new Command();
+ command.setCommandType(commandType);
+ command.setProcessDefinitionId(processDefinitionId);
+ command.setCommandParam(String.format("{\"%s\":%d}",
+ CMDPARAM_RECOVER_PROCESS_ID_STRING, instanceId));
+ command.setExecutorId(loginUser.getId());
+
+ if(!processDao.verifyIsNeedCreateCommand(command)){
+ putMsg(result, Status.PROCESS_INSTANCE_EXECUTING_COMMAND,processDefinitionId);
+ return result;
+ }
+
+ int create = processDao.createCommand(command);
+
+ if (create > 0) {
+ putMsg(result, Status.SUCCESS);
+ } else {
+ putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR);
+ }
+
+ return result;
+ }
+
+ /**
+ * check if subprocesses are offline before starting process definition
+ * @param processDefineId
+ * @return
+ */
+ public Map startCheckByProcessDefinedId(int processDefineId) {
+ Map result = new HashMap();
+
+ if (processDefineId == 0){
+ logger.error("process definition id is null");
+ putMsg(result,Status.REQUEST_PARAMS_NOT_VALID_ERROR,"process definition id");
+ }
+ List ids = new ArrayList<>();
+ processDao.recurseFindSubProcessId(processDefineId, ids);
+ if (ids.size() > 0){
+ List processDefinitionList = processDefinitionMapper.queryDefinitionListByIdList(ids);
+ if (processDefinitionList != null && processDefinitionList.size() > 0){
+ for (ProcessDefinition processDefinition : processDefinitionList){
+ /**
+ * if there is no online process, exit directly
+ */
+ if (processDefinition.getReleaseState() != ReleaseState.ONLINE){
+ putMsg(result,Status.PROCESS_DEFINE_NOT_RELEASE, processDefinition.getName());
+ logger.info("not release process definition id: {} , name : {}",
+ processDefinition.getId(), processDefinition.getName());
+ return result;
+ }
+ }
+ }
+ }
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+ /**
+ * query recipients and copyers by process definition id
+ *
+ * @param processDefineId
+ * @return
+ */
+ public Map getReceiverCc(int processDefineId) {
+ Map result = new HashMap<>();
+
+ ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineId(processDefineId);
+ if (processDefinition == null){
+ throw new RuntimeException("processDefineId is not exists");
+ }
+ String receivers = processDefinition.getReceivers();
+ String receiversCc = processDefinition.getReceiversCc();
+ Map dataMap = new HashMap<>();
+ dataMap.put(Constants.RECEIVERS,receivers);
+ dataMap.put(Constants.RECEIVERS_CC,receiversCc);
+
+ result.put(Constants.DATA_LIST, dataMap);
+ putMsg(result, Status.SUCCESS);
+ return result;
+ }
+
+
+ /**
+ * create command
+ *
+ * @param commandType
+ * @param processDefineId
+ * @param nodeDep
+ * @param failureStrategy
+ * @param startNodeList
+ * @param schedule
+ * @param warningType
+ * @param excutorId
+ * @param warningGroupId
+ * @param runMode
+ * @return
+ * @throws ParseException
+ */
+ private int createCommand(CommandType commandType, int processDefineId,
+ TaskDependType nodeDep, FailureStrategy failureStrategy,
+ String startNodeList, String schedule, WarningType warningType,
+ int excutorId, int warningGroupId,
+ RunMode runMode,Priority processInstancePriority) throws ParseException {
+
+ /**
+ * instantiate command schedule instance
+ */
+ Command command = new Command();
+
+ Map cmdParam = new HashMap<>();
+ if(commandType == null){
+ command.setCommandType(CommandType.START_PROCESS);
+ }else{
+ command.setCommandType(commandType);
+ }
+ command.setProcessDefinitionId(processDefineId);
+ if(nodeDep != null){
+ command.setTaskDependType(nodeDep);
+ }
+ if(failureStrategy != null){
+ command.setFailureStrategy(failureStrategy);
+ }
+
+ if(StringUtils.isNotEmpty(startNodeList)){
+ cmdParam.put(CMDPARAM_START_NODE_NAMES, startNodeList);
+ }
+ if(warningType != null){
+ command.setWarningType(warningType);
+ }
+ command.setCommandParam(JSONUtils.toJson(cmdParam));
+ command.setExecutorId(excutorId);
+ command.setWarningGroupId(warningGroupId);
+ command.setProcessInstancePriority(processInstancePriority);
+
+ Date start = null;
+ Date end = null;
+ if(StringUtils.isNotEmpty(schedule)){
+ String[] interval = schedule.split(",");
+ if(interval.length == 2){
+ start = DateUtils.getScheduleDate(interval[0]);
+ end = DateUtils.getScheduleDate(interval[1]);
+ }
+ }
+
+ if(commandType == CommandType.COMPLEMENT_DATA){
+ runMode = (runMode == null) ? RunMode.RUN_MODE_SERIAL : runMode;
+ if(runMode == RunMode.RUN_MODE_SERIAL){
+ cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
+ cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end));
+ command.setCommandParam(JSONUtils.toJson(cmdParam));
+ return processDao.createCommand(command);
+ }else if (runMode == RunMode.RUN_MODE_PARALLEL){
+ int runCunt = 0;
+ while(!start.after(end)){
+ runCunt += 1;
+ cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
+ cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(start));
+ command.setCommandParam(JSONUtils.toJson(cmdParam));
+ processDao.createCommand(command);
+ start = DateUtils.getSomeDay(start, 1);
+ }
+ return runCunt;
+ }
+ }else{
+ command.setCommandParam(JSONUtils.toJson(cmdParam));
+ return processDao.createCommand(command);
+ }
+
+ return 0;
+ }
+
+ /**
+ * check result and auth
+ *
+ * @param loginUser
+ * @param projectName
+ * @param project
+ * @return
+ */
+ private Map checkResultAndAuth(User loginUser, String projectName, Project project) {
+ // check project auth
+ Map checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
+ Status status = (Status) checkResult.get(Constants.STATUS);
+ if (status != Status.SUCCESS) {
+ return checkResult;
+ }
+ return null;
+ }
+
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/LoggerService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/LoggerService.java
new file mode 100644
index 0000000000..46a175ec75
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/service/LoggerService.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.service;
+
+import cn.escheduler.api.enums.Status;
+import cn.escheduler.api.log.LogClient;
+import cn.escheduler.api.utils.Result;
+import cn.escheduler.common.Constants;
+import cn.escheduler.dao.ProcessDao;
+import cn.escheduler.dao.model.TaskInstance;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+/**
+ * log service
+ */
+@Service
+public class LoggerService {
+
+ private static final Logger logger = LoggerFactory.getLogger(LoggerService.class);
+
+ @Autowired
+ private ProcessDao processDao;
+
+ /**
+ * view log
+ *
+ * @param taskInstId
+ * @param skipLineNum
+ * @param limit
+ * @return
+ */
+ public Result queryLog(int taskInstId, int skipLineNum, int limit) {
+
+ TaskInstance taskInstance = processDao.findTaskInstanceById(taskInstId);
+ String host = taskInstance.getHost();
+ if(StringUtils.isEmpty(host)){
+ return new Result(Status.TASK_INSTANCE_HOST_NOT_FOUND.getCode(), Status.TASK_INSTANCE_HOST_NOT_FOUND.getMsg());
+ }
+ logger.info("log host : {} , logPath : {} , logServer port : {}",host,taskInstance.getLogPath(),Constants.RPC_PORT);
+
+ Result result = new Result(Status.SUCCESS.getCode(), Status.SUCCESS.getMsg());
+
+ if(host != null){
+ LogClient logClient = new LogClient(host, Constants.RPC_PORT);
+ String log = logClient.rollViewLog(taskInstance.getLogPath(),skipLineNum,limit);
+ result.setData(log);
+ logger.info(log);
+ }
+
+ return result;
+ }
+
+ /**
+ * get log size
+ *
+ * @param taskInstId
+ * @return
+ */
+ public byte[] getLogBytes(int taskInstId) {
+ TaskInstance taskInstance = processDao.findTaskInstanceById(taskInstId);
+ if (taskInstance == null){
+ throw new RuntimeException("task instance is null");
+ }
+ String host = taskInstance.getHost();
+ LogClient logClient = new LogClient(host, Constants.RPC_PORT);
+ return logClient.getLogBytes(taskInstance.getLogPath());
+ }
+}
diff --git a/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java b/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java
new file mode 100644
index 0000000000..1b0d806967
--- /dev/null
+++ b/escheduler-api/src/main/java/cn/escheduler/api/service/ProcessDefinitionService.java
@@ -0,0 +1,730 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package cn.escheduler.api.service;
+
+import cn.escheduler.api.dto.treeview.Instance;
+import cn.escheduler.api.dto.treeview.TreeViewDto;
+import cn.escheduler.api.enums.Status;
+import cn.escheduler.api.utils.Constants;
+import cn.escheduler.api.utils.PageInfo;
+import cn.escheduler.common.enums.Flag;
+import cn.escheduler.common.enums.ReleaseState;
+import cn.escheduler.common.enums.TaskType;
+import cn.escheduler.common.graph.DAG;
+import cn.escheduler.common.model.TaskNode;
+import cn.escheduler.common.model.TaskNodeRelation;
+import cn.escheduler.common.process.ProcessDag;
+import cn.escheduler.common.process.Property;
+import cn.escheduler.common.thread.Stopper;
+import cn.escheduler.common.utils.CollectionUtils;
+import cn.escheduler.common.utils.DateUtils;
+import cn.escheduler.common.utils.JSONUtils;
+import cn.escheduler.dao.ProcessDao;
+import cn.escheduler.dao.mapper.*;
+import cn.escheduler.dao.model.*;
+import com.alibaba.fastjson.JSON;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+import org.springframework.transaction.annotation.Transactional;
+
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static cn.escheduler.api.enums.Status.UPDATE_PROCESS_DEFINITION_ERROR;
+import static cn.escheduler.api.service.SchedulerService.deleteSchedule;
+import static cn.escheduler.api.utils.CheckUtils.checkOtherParams;
+import static cn.escheduler.api.utils.CheckUtils.checkTaskNodeParameters;
+import static cn.escheduler.common.Constants.CMDPARAM_SUB_PROCESS_DEFINE_ID;
+
+/**
+ * process definition service
+ */
+@Service
+public class ProcessDefinitionService extends BaseDAGService {
+
+ private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionService.class);
+
+
+ @Autowired
+ private ProjectMapper projectMapper;
+
+ @Autowired
+ private ProjectService projectService;
+
+ @Autowired
+ private ProcessDefinitionMapper processDefineMapper;
+
+ @Autowired
+ private ProcessInstanceMapper processInstanceMapper;
+
+
+ @Autowired
+ private TaskInstanceMapper taskInstanceMapper;
+
+ @Autowired
+ private ScheduleMapper scheduleMapper;
+
+ @Autowired
+ private ProcessDao processDao;
+
+ /**
+ * create process definition
+ *
+ * @param loginUser
+ * @param projectName
+ * @param name
+ * @param processDefinitionJson
+ * @param desc
+ * @param locations
+ * @param connects
+ * @return
+ */
+ public Map createProcessDefinition(User loginUser, String projectName, String name,
+ String processDefinitionJson, String desc, String locations, String connects) throws JsonProcessingException {
+
+ Map result = new HashMap<>(5);
+ Project project = projectMapper.queryByName(projectName);
+ // check project auth
+ Map checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
+ Status resultStatus = (Status) checkResult.get(Constants.STATUS);
+ if (resultStatus != Status.SUCCESS) {
+ return checkResult;
+ }
+
+ ProcessDefinition processDefine = new ProcessDefinition();
+ Date now = new Date();
+
+ ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
+ Map checkProcessJson = checkProcessNodeList(processData, processDefinitionJson);
+ if (checkProcessJson.get(Constants.STATUS) != Status.SUCCESS) {
+ return result;
+ }
+
+ processDefine.setName(name);
+ processDefine.setReleaseState(ReleaseState.OFFLINE);
+ processDefine.setProjectId(project.getId());
+ processDefine.setUserId(loginUser.getId());
+ processDefine.setProcessDefinitionJson(processDefinitionJson);
+ processDefine.setDesc(desc);
+ processDefine.setLocations(locations);
+ processDefine.setConnects(connects);
+
+ //custom global params
+ List globalParamsList = processData.getGlobalParams();
+ if (globalParamsList != null && globalParamsList.size() > 0) {
+ Set globalParamsSet = new HashSet<>(globalParamsList);
+ globalParamsList = new ArrayList<>(globalParamsSet);
+ processDefine.setGlobalParamList(globalParamsList);
+ }
+ processDefine.setCreateTime(now);
+ processDefine.setUpdateTime(now);
+ processDefine.setFlag(Flag.YES);
+ processDefineMapper.insert(processDefine);
+ putMsg(result, Status.SUCCESS);
+
+ return result;
+ }
+
+
+ /**
+ * query proccess definition list
+ *
+ * @param loginUser
+ * @param projectName
+ * @return
+ */
+ public Map queryProccessDefinitionList(User loginUser, String projectName) {
+
+ HashMap result = new HashMap<>(5);
+ Project project = projectMapper.queryByName(projectName);
+
+ Map checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
+ Status resultStatus = (Status) checkResult.get(Constants.STATUS);
+ if (resultStatus != Status.SUCCESS) {
+ return checkResult;
+ }
+
+ List resourceList = processDefineMapper.queryAllDefinitionList(project.getId());
+ result.put(Constants.DATA_LIST, resourceList);
+ putMsg(result, Status.SUCCESS);
+
+ return result;
+ }
+
+
+ /**
+ * query proccess definition list paging
+ *
+ * @param loginUser
+ * @param projectName
+ * @param searchVal
+ * @param pageNo
+ * @param pageSize
+ * @param userId
+ * @return
+ */
+ public Map queryProcessDefinitionListPaging(User loginUser, String projectName, String searchVal, Integer pageNo, Integer pageSize, Integer userId) {
+
+ Map result = new HashMap<>(5);
+ Project project = projectMapper.queryByName(projectName);
+
+ Map checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
+ Status resultStatus = (Status) checkResult.get(Constants.STATUS);
+ if (resultStatus != Status.SUCCESS) {
+ return checkResult;
+ }
+
+ Integer count = processDefineMapper.countDefineNumber(project.getId(), userId, searchVal);
+
+ PageInfo pageInfo = new PageInfo(pageNo, pageSize);
+ List resourceList = processDefineMapper.queryDefineListPaging(project.getId(),
+ searchVal, userId, pageInfo.getStart(), pageSize);
+ pageInfo.setTotalCount(count);
+ pageInfo.setLists(resourceList);
+ result.put(Constants.DATA_LIST, pageInfo);
+ putMsg(result, Status.SUCCESS);
+
+ return result;
+ }
+
+ /**
+ * query datail of process definition
+ *
+ * @param loginUser
+ * @param projectName
+ * @param processId
+ * @return
+ */
+ public Map queryProccessDefinitionById(User loginUser, String projectName, Integer processId) {
+
+
+ Map result = new HashMap<>(5);
+ Project project = projectMapper.queryByName(projectName);
+
+ Map checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
+ Status resultStatus = (Status) checkResult.get(Constants.STATUS);
+ if (resultStatus != Status.SUCCESS) {
+ return checkResult;
+ }
+
+ ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(processId);
+ if (processDefinition == null) {
+ putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processId);
+ } else {
+ result.put(Constants.DATA_LIST, processDefinition);
+ putMsg(result, Status.SUCCESS);
+ }
+ return result;
+ }
+
+ /**
+ * update process definition
+ *
+ * @param loginUser
+ * @param projectName
+ * @param id
+ * @param name
+ * @param processDefinitionJson
+ * @param desc
+ * @param locations
+ * @param connects
+ * @return
+ */
+ public Map updateProccessDefinition(User loginUser, String projectName, int id, String name,
+ String processDefinitionJson, String desc,
+ String locations, String connects) throws JsonProcessingException {
+ Map result = new HashMap<>(5);
+
+ Project project = projectMapper.queryByName(projectName);
+ Map checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
+ Status resultStatus = (Status) checkResult.get(Constants.STATUS);
+ if (resultStatus != Status.SUCCESS) {
+ return checkResult;
+ }
+
+ ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
+ Map checkProcessJson = checkProcessNodeList(processData, processDefinitionJson);
+ if ((checkProcessJson.get(Constants.STATUS) != Status.SUCCESS)) {
+ return result;
+ }
+ ProcessDefinition processDefinition = processDao.findProcessDefineById(id);
+ if (processDefinition == null) {
+ // check process definition exists
+ putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, id);
+ return result;
+ } else if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
+ // online can not permit edit
+ putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName());
+ return result;
+ } else {
+ putMsg(result, Status.SUCCESS);
+ }
+
+ ProcessDefinition processDefine = processDao.findProcessDefineById(id);
+ Date now = new Date();
+
+ processDefine.setId(id);
+ processDefine.setName(name);
+ processDefine.setReleaseState(ReleaseState.OFFLINE);
+ processDefine.setProjectId(project.getId());
+ processDefine.setUserId(loginUser.getId());
+ processDefine.setProcessDefinitionJson(processDefinitionJson);
+ processDefine.setDesc(desc);
+ processDefine.setLocations(locations);
+ processDefine.setConnects(connects);
+
+ //custom global params
+ List globalParamsList = processData.getGlobalParams();
+ if (globalParamsList != null && globalParamsList.size() > 0) {
+ Set userDefParamsSet = new HashSet<>(globalParamsList);
+ globalParamsList = new ArrayList<>(userDefParamsSet);
+ processDefine.setGlobalParamList(globalParamsList);
+ }
+ processDefine.setUpdateTime(now);
+ processDefine.setFlag(Flag.YES);
+ if (processDefineMapper.update(processDefine) > 0) {
+ putMsg(result, Status.SUCCESS);
+
+ } else {
+ putMsg(result, UPDATE_PROCESS_DEFINITION_ERROR);
+ }
+ return result;
+ }
+
+ /**
+ * verify process definition name unique
+ *
+ * @param loginUser
+ * @param projectName
+ * @param name
+ * @return
+ */
+ public Map verifyProccessDefinitionName(User loginUser, String projectName, String name) {
+
+ Map result = new HashMap<>();
+ Project project = projectMapper.queryByName(projectName);
+
+ Map