Browse Source

Merge pull request #19 from apache/dev

update code from apa
pull/3/MERGE
BoYiZhang 4 years ago committed by GitHub
parent
commit
622bce603a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      dolphinscheduler-api/pom.xml
  2. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/TaskRecordController.java
  3. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
  4. 3
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
  5. 26
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/TenantServiceImpl.java
  6. 47
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/RegexUtils.java
  7. 4
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java
  8. 39
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/RegexUtilsTest.java
  9. 9
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java
  10. 6
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
  11. 3
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml
  12. 51
      dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/NettyRemotingClient.java
  13. 12
      dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/NettyRemotingServer.java
  14. 106
      dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/command/CommandType.java
  15. 75
      dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/handler/NettyClientHandler.java
  16. 67
      dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/handler/NettyServerHandler.java
  17. 5
      dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/utils/Constants.java
  18. 604
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java
  19. 167
      dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
  20. 116
      dolphinscheduler-service/src/test/java/org/apache/dolphinscheduler/service/process/ProcessServiceTest.java
  21. 7
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/config.js
  22. 218
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/_source/dependentTimeout.vue
  23. 33
      dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue
  24. 3
      dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/_source/instanceConditions/common.js
  25. 23
      dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/_source/list.vue
  26. 12
      dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js
  27. 6
      dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
  28. 6
      dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
  29. 1
      pom.xml
  30. 2
      sql/upgrade/1.3.3_schema/mysql/dolphinscheduler_ddl.sql
  31. 4
      sql/upgrade/1.3.3_schema/postgresql/dolphinscheduler_ddl.sql

6
dolphinscheduler-api/pom.xml

@ -162,6 +162,12 @@
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId> <artifactId>hadoop-client</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency> </dependency>
<dependency> <dependency>

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/TaskRecordController.java

@ -22,11 +22,13 @@ import org.apache.dolphinscheduler.api.service.TaskRecordService;
import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.entity.User;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus; import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.*; import org.springframework.web.bind.annotation.*;
import springfox.documentation.annotations.ApiIgnore; import springfox.documentation.annotations.ApiIgnore;
import java.util.Map; import java.util.Map;
@ -59,7 +61,7 @@ public class TaskRecordController extends BaseController {
* @param taskDate task date * @param taskDate task date
* @param startTime start time * @param startTime start time
* @param endTime end time * @param endTime end time
* @param pageNo page numbere * @param pageNo page number
* @param pageSize page size * @param pageSize page size
* @return task record list * @return task record list
*/ */

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java

@ -193,7 +193,9 @@ public enum Status {
BATCH_COPY_PROCESS_DEFINITION_ERROR(10159, "batch copy process definition error", "复制工作流错误"), BATCH_COPY_PROCESS_DEFINITION_ERROR(10159, "batch copy process definition error", "复制工作流错误"),
BATCH_MOVE_PROCESS_DEFINITION_ERROR(10160, "batch move process definition error", "移动工作流错误"), BATCH_MOVE_PROCESS_DEFINITION_ERROR(10160, "batch move process definition error", "移动工作流错误"),
QUERY_WORKFLOW_LINEAGE_ERROR(10161, "query workflow lineage error", "查询血缘失败"), QUERY_WORKFLOW_LINEAGE_ERROR(10161, "query workflow lineage error", "查询血缘失败"),
DELETE_PROCESS_DEFINITION_BY_ID_FAIL(10162,"delete process definition by id fail, for there are {0} process instances in executing using it", "删除工作流定义失败,有[{0}]个运行中的工作流实例正在使用"), DELETE_PROCESS_DEFINITION_BY_ID_FAIL(10162, "delete process definition by id fail, for there are {0} process instances in executing using it", "删除工作流定义失败,有[{0}]个运行中的工作流实例正在使用"),
CHECK_TENANT_CODE_ERROR(10163, "Please enter the English tenant code", "请输入英文租户编码"),
UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"), UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"),
UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"), UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"),

3
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java

@ -434,7 +434,7 @@ public class ProcessDefinitionServiceImpl extends BaseService implements
if (resultEnum != Status.SUCCESS) { if (resultEnum != Status.SUCCESS) {
return checkResult; return checkResult;
} }
ProcessDefinition processDefinition = processDefineMapper.queryByDefineName(project.getId(), name); ProcessDefinition processDefinition = processDefineMapper.verifyByDefineName(project.getId(), name);
if (processDefinition == null) { if (processDefinition == null) {
putMsg(result, Status.SUCCESS); putMsg(result, Status.SUCCESS);
} else { } else {
@ -683,6 +683,7 @@ public class ProcessDefinitionServiceImpl extends BaseService implements
exportProcessMeta.setProjectName(processDefinition.getProjectName()); exportProcessMeta.setProjectName(processDefinition.getProjectName());
exportProcessMeta.setProcessDefinitionName(processDefinition.getName()); exportProcessMeta.setProcessDefinitionName(processDefinition.getName());
exportProcessMeta.setProcessDefinitionJson(processDefinition.getProcessDefinitionJson()); exportProcessMeta.setProcessDefinitionJson(processDefinition.getProcessDefinitionJson());
exportProcessMeta.setProcessDefinitionDescription(processDefinition.getDescription());
exportProcessMeta.setProcessDefinitionLocations(processDefinition.getLocations()); exportProcessMeta.setProcessDefinitionLocations(processDefinition.getLocations());
exportProcessMeta.setProcessDefinitionConnects(processDefinition.getConnects()); exportProcessMeta.setProcessDefinitionConnects(processDefinition.getConnects());

26
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/TenantServiceImpl.java

@ -21,6 +21,7 @@ import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.BaseService; import org.apache.dolphinscheduler.api.service.BaseService;
import org.apache.dolphinscheduler.api.service.TenantService; import org.apache.dolphinscheduler.api.service.TenantService;
import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.RegexUtils;
import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.CollectionUtils;
@ -73,11 +74,11 @@ public class TenantServiceImpl extends BaseService implements TenantService {
/** /**
* create tenant * create tenant
* *
* @param loginUser login user * @param loginUser login user
* @param tenantCode tenant code * @param tenantCode tenant code
* @param tenantName tenant name * @param tenantName tenant name
* @param queueId queue id * @param queueId queue id
* @param desc description * @param desc description
* @return create result code * @return create result code
* @throws Exception exception * @throws Exception exception
*/ */
@ -94,6 +95,11 @@ public class TenantServiceImpl extends BaseService implements TenantService {
return result; return result;
} }
if (RegexUtils.isNumeric(tenantCode)) {
putMsg(result, Status.CHECK_TENANT_CODE_ERROR);
return result;
}
if (checkTenantExists(tenantCode)) { if (checkTenantExists(tenantCode)) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, tenantCode); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, tenantCode);
return result; return result;
@ -131,8 +137,8 @@ public class TenantServiceImpl extends BaseService implements TenantService {
* *
* @param loginUser login user * @param loginUser login user
* @param searchVal search value * @param searchVal search value
* @param pageNo page number * @param pageNo page number
* @param pageSize page size * @param pageSize page size
* @return tenant list page * @return tenant list page
*/ */
public Map<String, Object> queryTenantList(User loginUser, String searchVal, Integer pageNo, Integer pageSize) { public Map<String, Object> queryTenantList(User loginUser, String searchVal, Integer pageNo, Integer pageSize) {
@ -157,12 +163,12 @@ public class TenantServiceImpl extends BaseService implements TenantService {
/** /**
* updateProcessInstance tenant * updateProcessInstance tenant
* *
* @param loginUser login user * @param loginUser login user
* @param id tennat id * @param id tennat id
* @param tenantCode tennat code * @param tenantCode tennat code
* @param tenantName tennat name * @param tenantName tennat name
* @param queueId queue id * @param queueId queue id
* @param desc description * @param desc description
* @return update result code * @return update result code
* @throws Exception exception * @throws Exception exception
*/ */
@ -229,7 +235,7 @@ public class TenantServiceImpl extends BaseService implements TenantService {
* delete tenant * delete tenant
* *
* @param loginUser login user * @param loginUser login user
* @param id tenant id * @param id tenant id
* @return delete result code * @return delete result code
* @throws Exception exception * @throws Exception exception
*/ */

47
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/RegexUtils.java

@ -0,0 +1,47 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.utils;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* This is Regex expression utils.
*/
public class RegexUtils {
/**
* check number regex expression
*/
private static final String CHECK_NUMBER = "^-?\\d+(\\.\\d+)?$";
private RegexUtils() {
}
/**
* check if the input is number
*
* @param str input
* @return
*/
public static boolean isNumeric(String str) {
Pattern pattern = Pattern.compile(CHECK_NUMBER);
Matcher isNum = pattern.matcher(str);
return isNum.matches();
}
}

4
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java

@ -618,13 +618,13 @@ public class ProcessDefinitionServiceTest {
//project check auth success, process not exist //project check auth success, process not exist
putMsg(result, Status.SUCCESS, projectName); putMsg(result, Status.SUCCESS, projectName);
Mockito.when(processDefineMapper.queryByDefineName(project.getId(), "test_pdf")).thenReturn(null); Mockito.when(processDefineMapper.verifyByDefineName(project.getId(), "test_pdf")).thenReturn(null);
Map<String, Object> processNotExistRes = processDefinitionService.verifyProcessDefinitionName(loginUser, Map<String, Object> processNotExistRes = processDefinitionService.verifyProcessDefinitionName(loginUser,
"project_test1", "test_pdf"); "project_test1", "test_pdf");
Assert.assertEquals(Status.SUCCESS, processNotExistRes.get(Constants.STATUS)); Assert.assertEquals(Status.SUCCESS, processNotExistRes.get(Constants.STATUS));
//process exist //process exist
Mockito.when(processDefineMapper.queryByDefineName(project.getId(), "test_pdf")).thenReturn(getProcessDefinition()); Mockito.when(processDefineMapper.verifyByDefineName(project.getId(), "test_pdf")).thenReturn(getProcessDefinition());
Map<String, Object> processExistRes = processDefinitionService.verifyProcessDefinitionName(loginUser, Map<String, Object> processExistRes = processDefinitionService.verifyProcessDefinitionName(loginUser,
"project_test1", "test_pdf"); "project_test1", "test_pdf");
Assert.assertEquals(Status.PROCESS_INSTANCE_EXIST, processExistRes.get(Constants.STATUS)); Assert.assertEquals(Status.PROCESS_INSTANCE_EXIST, processExistRes.get(Constants.STATUS));

39
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/RegexUtilsTest.java

@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.utils;
import org.junit.Assert;
import org.junit.Test;
/**
* RegexUtils test case
*/
public class RegexUtilsTest {
@Test
public void testIsNumeric() {
String num1 = "123467854678";
boolean numeric = RegexUtils.isNumeric(num1);
Assert.assertTrue(numeric);
String num2 = "0.0.01";
boolean numeric2 = RegexUtils.isNumeric(num2);
Assert.assertFalse(numeric2);
}
}

9
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java

@ -34,6 +34,15 @@ import com.baomidou.mybatisplus.core.metadata.IPage;
*/ */
public interface ProcessDefinitionMapper extends BaseMapper<ProcessDefinition> { public interface ProcessDefinitionMapper extends BaseMapper<ProcessDefinition> {
/**
* verify process definition by name
*
* @param projectId projectId
* @param name name
* @return process definition
*/
ProcessDefinition verifyByDefineName(@Param("projectId") int projectId,
@Param("processDefinitionName") String name);
/** /**
* query process definition by name * query process definition by name

6
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml

@ -18,6 +18,12 @@
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper"> <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper">
<select id="verifyByDefineName" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
select pd.*
from t_ds_process_definition pd
WHERE pd.project_id = #{projectId}
and pd.name = #{processDefinitionName}
</select>
<select id="queryByDefineName" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> <select id="queryByDefineName" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
select pd.*,u.user_name,p.name as project_name,t.tenant_code,t.tenant_name,q.queue,q.queue_name select pd.*,u.user_name,p.name as project_name,t.tenant_code,t.tenant_name,q.queue,q.queue_name
from t_ds_process_definition pd from t_ds_process_definition pd

3
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml

@ -167,7 +167,7 @@
<select id="queryLastRunningProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> <select id="queryLastRunningProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select * select *
from t_ds_process_instance from t_ds_process_instance
where 1=1 where process_definition_id=#{processDefinitionId}
<if test="states !=null and states.length != 0"> <if test="states !=null and states.length != 0">
and state in and state in
<foreach collection="states" item="i" index="index" open="(" separator="," close=")"> <foreach collection="states" item="i" index="index" open="(" separator="," close=")">
@ -175,7 +175,6 @@
</foreach> </foreach>
</if> </if>
<if test="startTime!=null and endTime != null "> <if test="startTime!=null and endTime != null ">
and process_definition_id=#{processDefinitionId}
and (schedule_time <![CDATA[ >= ]]> #{startTime} and schedule_time <![CDATA[ <= ]]> #{endTime} and (schedule_time <![CDATA[ >= ]]> #{startTime} and schedule_time <![CDATA[ <= ]]> #{endTime}
or start_time <![CDATA[ >= ]]> #{startTime} and start_time <![CDATA[ <= ]]> #{endTime}) or start_time <![CDATA[ >= ]]> #{startTime} and start_time <![CDATA[ <= ]]> #{endTime})
</if> </if>

51
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/NettyRemotingClient.java

@ -17,17 +17,6 @@
package org.apache.dolphinscheduler.remote; package org.apache.dolphinscheduler.remote;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import org.apache.dolphinscheduler.remote.codec.NettyDecoder; import org.apache.dolphinscheduler.remote.codec.NettyDecoder;
import org.apache.dolphinscheduler.remote.codec.NettyEncoder; import org.apache.dolphinscheduler.remote.codec.NettyEncoder;
import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.Command;
@ -41,19 +30,40 @@ import org.apache.dolphinscheduler.remote.future.ReleaseSemaphore;
import org.apache.dolphinscheduler.remote.future.ResponseFuture; import org.apache.dolphinscheduler.remote.future.ResponseFuture;
import org.apache.dolphinscheduler.remote.handler.NettyClientHandler; import org.apache.dolphinscheduler.remote.handler.NettyClientHandler;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.remote.utils.CallerThreadExecutePolicy; import org.apache.dolphinscheduler.remote.utils.CallerThreadExecutePolicy;
import org.apache.dolphinscheduler.remote.utils.Constants;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory; import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.remote.utils.NettyUtils; import org.apache.dolphinscheduler.remote.utils.NettyUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.concurrent.*; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.timeout.IdleStateHandler;
/** /**
* remoting netty client * remoting netty client
*/ */
@ -162,11 +172,10 @@ public class NettyRemotingClient {
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, clientConfig.getConnectTimeoutMillis()) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, clientConfig.getConnectTimeoutMillis())
.handler(new ChannelInitializer<SocketChannel>() { .handler(new ChannelInitializer<SocketChannel>() {
@Override @Override
public void initChannel(SocketChannel ch) throws Exception { public void initChannel(SocketChannel ch) {
ch.pipeline().addLast( ch.pipeline()
new NettyDecoder(), .addLast("client-idle-handler", new IdleStateHandler(Constants.NETTY_CLIENT_HEART_BEAT_TIME, 0, 0, TimeUnit.MILLISECONDS))
clientHandler, .addLast(new NettyDecoder(), clientHandler, encoder);
encoder);
} }
}); });
this.responseFutureExecutor.scheduleAtFixedRate(new Runnable() { this.responseFutureExecutor.scheduleAtFixedRate(new Runnable() {

12
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/NettyRemotingServer.java

@ -29,6 +29,7 @@ import org.apache.dolphinscheduler.remote.utils.NettyUtils;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
@ -39,11 +40,11 @@ import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption; import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup; import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.SocketChannel;
import io.netty.handler.timeout.IdleStateHandler;
/** /**
* remoting netty server * remoting netty server
@ -183,10 +184,11 @@ public class NettyRemotingServer {
* @param ch socket channel * @param ch socket channel
*/ */
private void initNettyChannel(SocketChannel ch) { private void initNettyChannel(SocketChannel ch) {
ChannelPipeline pipeline = ch.pipeline(); ch.pipeline()
pipeline.addLast("encoder", encoder); .addLast("encoder", encoder)
pipeline.addLast("decoder", new NettyDecoder()); .addLast("decoder", new NettyDecoder())
pipeline.addLast("handler", serverHandler); .addLast("server-idle-handle", new IdleStateHandler(0, 0, Constants.NETTY_SERVER_HEART_BEAT_TIME, TimeUnit.MILLISECONDS))
.addLast("handler", serverHandler);
} }
/** /**

106
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/command/CommandType.java

@ -1 +1,105 @@
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.remote.command; public enum CommandType { /** * remove task log request, */ REMOVE_TAK_LOG_REQUEST, /** * remove task log response */ REMOVE_TAK_LOG_RESPONSE, /** * roll view log request */ ROLL_VIEW_LOG_REQUEST, /** * roll view log response */ ROLL_VIEW_LOG_RESPONSE, /** * view whole log request */ VIEW_WHOLE_LOG_REQUEST, /** * view whole log response */ VIEW_WHOLE_LOG_RESPONSE, /** * get log bytes request */ GET_LOG_BYTES_REQUEST, /** * get log bytes response */ GET_LOG_BYTES_RESPONSE, WORKER_REQUEST, MASTER_RESPONSE, /** * execute task request */ TASK_EXECUTE_REQUEST, /** * execute task ack */ TASK_EXECUTE_ACK, /** * execute task response */ TASK_EXECUTE_RESPONSE, /** * kill task */ TASK_KILL_REQUEST, /** * kill task response */ TASK_KILL_RESPONSE, /** * ping */ PING, /** * pong */ PONG; } /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
public enum CommandType {
/**
* remove task log request,
*/
REMOVE_TAK_LOG_REQUEST,
/**
* remove task log response
*/
REMOVE_TAK_LOG_RESPONSE,
/**
* roll view log request
*/
ROLL_VIEW_LOG_REQUEST,
/**
* roll view log response
*/
ROLL_VIEW_LOG_RESPONSE,
/**
* view whole log request
*/
VIEW_WHOLE_LOG_REQUEST,
/**
* view whole log response
*/
VIEW_WHOLE_LOG_RESPONSE,
/**
* get log bytes request
*/
GET_LOG_BYTES_REQUEST,
/**
* get log bytes response
*/
GET_LOG_BYTES_RESPONSE,
WORKER_REQUEST,
MASTER_RESPONSE,
/**
* execute task request
*/
TASK_EXECUTE_REQUEST,
/**
* execute task ack
*/
TASK_EXECUTE_ACK,
/**
* execute task response
*/
TASK_EXECUTE_RESPONSE,
/**
* kill task
*/
TASK_KILL_REQUEST,
/**
* kill task response
*/
TASK_KILL_RESPONSE,
/**
* HEART_BEAT
*/
HEART_BEAT,
/**
* ping
*/
PING,
/**
* pong
*/
PONG;
}

75
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/handler/NettyClientHandler.java

@ -14,9 +14,9 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.dolphinscheduler.remote.handler; package org.apache.dolphinscheduler.remote.handler;
import io.netty.channel.*;
import org.apache.dolphinscheduler.remote.NettyRemotingClient; import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.CommandType;
@ -25,16 +25,24 @@ import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.remote.utils.ChannelUtils; import org.apache.dolphinscheduler.remote.utils.ChannelUtils;
import org.apache.dolphinscheduler.remote.utils.Constants; import org.apache.dolphinscheduler.remote.utils.Constants;
import org.apache.dolphinscheduler.remote.utils.Pair; import org.apache.dolphinscheduler.remote.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.RejectedExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.timeout.IdleStateEvent;
/** /**
* netty client request handler * netty client request handler
*/ */
@ChannelHandler.Sharable @ChannelHandler.Sharable
public class NettyClientHandler extends ChannelInboundHandlerAdapter { public class NettyClientHandler extends ChannelInboundHandlerAdapter {
@ -42,12 +50,14 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
private final Logger logger = LoggerFactory.getLogger(NettyClientHandler.class); private final Logger logger = LoggerFactory.getLogger(NettyClientHandler.class);
/** /**
* netty client * netty client
*/ */
private final NettyRemotingClient nettyRemotingClient; private final NettyRemotingClient nettyRemotingClient;
private static byte[] heartBeatData = "heart_beat".getBytes();
/** /**
* callback thread executor * callback thread executor
*/ */
private final ExecutorService callbackExecutor; private final ExecutorService callbackExecutor;
@ -57,19 +67,19 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
private final ConcurrentHashMap<CommandType, Pair<NettyRequestProcessor, ExecutorService>> processors; private final ConcurrentHashMap<CommandType, Pair<NettyRequestProcessor, ExecutorService>> processors;
/** /**
* default executor * default executor
*/ */
private final ExecutorService defaultExecutor = Executors.newFixedThreadPool(Constants.CPUS); private final ExecutorService defaultExecutor = Executors.newFixedThreadPool(Constants.CPUS);
public NettyClientHandler(NettyRemotingClient nettyRemotingClient, ExecutorService callbackExecutor){ public NettyClientHandler(NettyRemotingClient nettyRemotingClient, ExecutorService callbackExecutor) {
this.nettyRemotingClient = nettyRemotingClient; this.nettyRemotingClient = nettyRemotingClient;
this.callbackExecutor = callbackExecutor; this.callbackExecutor = callbackExecutor;
this.processors = new ConcurrentHashMap(); this.processors = new ConcurrentHashMap();
} }
/** /**
* When the current channel is not active, * When the current channel is not active,
* the current channel has reached the end of its life cycle * the current channel has reached the end of its life cycle
* *
* @param ctx channel handler context * @param ctx channel handler context
* @throws Exception * @throws Exception
@ -81,7 +91,7 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
} }
/** /**
* The current channel reads data from the remote * The current channel reads data from the remote
* *
* @param ctx channel handler context * @param ctx channel handler context
* @param msg message * @param msg message
@ -89,55 +99,55 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
*/ */
@Override @Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
processReceived(ctx.channel(), (Command)msg); processReceived(ctx.channel(), (Command) msg);
} }
/** /**
* register processor * register processor
* *
* @param commandType command type * @param commandType command type
* @param processor processor * @param processor processor
*/ */
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) { public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) {
this.registerProcessor(commandType, processor, null); this.registerProcessor(commandType, processor, null);
} }
/** /**
* register processor * register processor
* *
* @param commandType command type * @param commandType command type
* @param processor processor * @param processor processor
* @param executor thread executor * @param executor thread executor
*/ */
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) { public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) {
ExecutorService executorRef = executor; ExecutorService executorRef = executor;
if(executorRef == null){ if (executorRef == null) {
executorRef = defaultExecutor; executorRef = defaultExecutor;
} }
this.processors.putIfAbsent(commandType, new Pair<>(processor, executorRef)); this.processors.putIfAbsent(commandType, new Pair<>(processor, executorRef));
} }
/** /**
* process received logic * process received logic
* *
* @param command command * @param command command
*/ */
private void processReceived(final Channel channel, final Command command) { private void processReceived(final Channel channel, final Command command) {
ResponseFuture future = ResponseFuture.getFuture(command.getOpaque()); ResponseFuture future = ResponseFuture.getFuture(command.getOpaque());
if(future != null){ if (future != null) {
future.setResponseCommand(command); future.setResponseCommand(command);
future.release(); future.release();
if(future.getInvokeCallback() != null){ if (future.getInvokeCallback() != null) {
this.callbackExecutor.submit(new Runnable() { this.callbackExecutor.submit(new Runnable() {
@Override @Override
public void run() { public void run() {
future.executeInvokeCallback(); future.executeInvokeCallback();
} }
}); });
} else{ } else {
future.putResponse(command); future.putResponse(command);
} }
} else{ } else {
processByCommandType(channel, command); processByCommandType(channel, command);
} }
} }
@ -163,9 +173,10 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
} }
/** /**
* caught exception * caught exception
* @param ctx channel handler context *
* @param cause cause * @param ctx channel handler context
* @param cause cause
* @throws Exception * @throws Exception
*/ */
@Override @Override
@ -175,4 +186,18 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
ctx.channel().close(); ctx.channel().close();
} }
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof IdleStateEvent) {
Command heartBeat = new Command();
heartBeat.setType(CommandType.HEART_BEAT);
heartBeat.setBody(heartBeatData);
ctx.writeAndFlush(heartBeat)
.addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
} else {
super.userEventTriggered(ctx, evt);
}
}
} }

67
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/handler/NettyServerHandler.java

@ -17,22 +17,30 @@
package org.apache.dolphinscheduler.remote.handler; package org.apache.dolphinscheduler.remote.handler;
import io.netty.channel.*;
import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.NettyRemotingServer;
import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.remote.utils.ChannelUtils; import org.apache.dolphinscheduler.remote.utils.ChannelUtils;
import org.apache.dolphinscheduler.remote.utils.Pair; import org.apache.dolphinscheduler.remote.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.RejectedExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.netty.channel.Channel;
import io.netty.channel.ChannelConfig;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.timeout.IdleStateEvent;
/** /**
* netty server request handler * netty server request handler
*/ */
@ChannelHandler.Sharable @ChannelHandler.Sharable
public class NettyServerHandler extends ChannelInboundHandlerAdapter { public class NettyServerHandler extends ChannelInboundHandlerAdapter {
@ -40,22 +48,23 @@ public class NettyServerHandler extends ChannelInboundHandlerAdapter {
private final Logger logger = LoggerFactory.getLogger(NettyServerHandler.class); private final Logger logger = LoggerFactory.getLogger(NettyServerHandler.class);
/** /**
* netty remote server * netty remote server
*/ */
private final NettyRemotingServer nettyRemotingServer; private final NettyRemotingServer nettyRemotingServer;
/** /**
* server processors queue * server processors queue
*/ */
private final ConcurrentHashMap<CommandType, Pair<NettyRequestProcessor, ExecutorService>> processors = new ConcurrentHashMap(); private final ConcurrentHashMap<CommandType, Pair<NettyRequestProcessor, ExecutorService>> processors = new ConcurrentHashMap();
public NettyServerHandler(NettyRemotingServer nettyRemotingServer){ public NettyServerHandler(NettyRemotingServer nettyRemotingServer) {
this.nettyRemotingServer = nettyRemotingServer; this.nettyRemotingServer = nettyRemotingServer;
} }
/** /**
* When the current channel is not active, * When the current channel is not active,
* the current channel has reached the end of its life cycle * the current channel has reached the end of its life cycle
*
* @param ctx channel handler context * @param ctx channel handler context
* @throws Exception * @throws Exception
*/ */
@ -73,38 +82,39 @@ public class NettyServerHandler extends ChannelInboundHandlerAdapter {
*/ */
@Override @Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
processReceived(ctx.channel(), (Command)msg); processReceived(ctx.channel(), (Command) msg);
} }
/** /**
* register processor * register processor
* *
* @param commandType command type * @param commandType command type
* @param processor processor * @param processor processor
*/ */
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) { public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) {
this.registerProcessor(commandType, processor, null); this.registerProcessor(commandType, processor, null);
} }
/** /**
* register processor * register processor
* *
* @param commandType command type * @param commandType command type
* @param processor processor * @param processor processor
* @param executor thread executor * @param executor thread executor
*/ */
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) { public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) {
ExecutorService executorRef = executor; ExecutorService executorRef = executor;
if(executorRef == null){ if (executorRef == null) {
executorRef = nettyRemotingServer.getDefaultExecutor(); executorRef = nettyRemotingServer.getDefaultExecutor();
} }
this.processors.putIfAbsent(commandType, new Pair<>(processor, executorRef)); this.processors.putIfAbsent(commandType, new Pair<>(processor, executorRef));
} }
/** /**
* process received logic * process received logic
*
* @param channel channel * @param channel channel
* @param msg message * @param msg message
*/ */
private void processReceived(final Channel channel, final Command msg) { private void processReceived(final Channel channel, final Command msg) {
final CommandType commandType = msg.getType(); final CommandType commandType = msg.getType();
@ -132,22 +142,22 @@ public class NettyServerHandler extends ChannelInboundHandlerAdapter {
} }
/** /**
* caught exception * caught exception
* *
* @param ctx channel handler context * @param ctx channel handler context
* @param cause cause * @param cause cause
* @throws Exception * @throws Exception
*/ */
@Override @Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.error("exceptionCaught : {}",cause.getMessage(), cause); logger.error("exceptionCaught : {}", cause.getMessage(), cause);
ctx.channel().close(); ctx.channel().close();
} }
/** /**
* channel write changed * channel write changed
* *
* @param ctx channel handler context * @param ctx channel handler context
* @throws Exception * @throws Exception
*/ */
@Override @Override
@ -158,16 +168,25 @@ public class NettyServerHandler extends ChannelInboundHandlerAdapter {
if (!ch.isWritable()) { if (!ch.isWritable()) {
if (logger.isWarnEnabled()) { if (logger.isWarnEnabled()) {
logger.warn("{} is not writable, over high water level : {}", logger.warn("{} is not writable, over high water level : {}",
ch, config.getWriteBufferHighWaterMark()); ch, config.getWriteBufferHighWaterMark());
} }
config.setAutoRead(false); config.setAutoRead(false);
} else { } else {
if (logger.isWarnEnabled()) { if (logger.isWarnEnabled()) {
logger.warn("{} is writable, to low water : {}", logger.warn("{} is writable, to low water : {}",
ch, config.getWriteBufferLowWaterMark()); ch, config.getWriteBufferLowWaterMark());
} }
config.setAutoRead(true); config.setAutoRead(true);
} }
} }
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof IdleStateEvent) {
ctx.channel().close();
} else {
super.userEventTriggered(ctx, evt);
}
}
} }

5
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/utils/Constants.java

@ -20,7 +20,6 @@ package org.apache.dolphinscheduler.remote.utils;
import java.nio.charset.Charset; import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
/** /**
* constant * constant
*/ */
@ -30,6 +29,10 @@ public class Constants {
public static final String SLASH = "/"; public static final String SLASH = "/";
public static final int NETTY_SERVER_HEART_BEAT_TIME = 1000 * 60 * 3 + 1000;
public static final int NETTY_CLIENT_HEART_BEAT_TIME = 1000 * 60;
/** /**
* charset * charset
*/ */

604
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java

@ -20,6 +20,7 @@ import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent; import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.curator.framework.recipes.locks.InterProcessMutex; import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ZKNodeType; import org.apache.dolphinscheduler.common.enums.ZKNodeType;
@ -33,6 +34,7 @@ import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils; import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.zk.AbstractZKClient; import org.apache.dolphinscheduler.service.zk.AbstractZKClient;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
@ -45,309 +47,309 @@ import static org.apache.dolphinscheduler.common.Constants.SLEEP_TIME_MILLIS;
/** /**
* zookeeper master client * zookeeper master client
* * <p>
* single instance * single instance
*/ */
@Component @Component
public class ZKMasterClient extends AbstractZKClient { public class ZKMasterClient extends AbstractZKClient {
/** /**
* logger * logger
*/ */
private static final Logger logger = LoggerFactory.getLogger(ZKMasterClient.class); private static final Logger logger = LoggerFactory.getLogger(ZKMasterClient.class);
/** /**
* process service * process service
*/ */
@Autowired @Autowired
private ProcessService processService; private ProcessService processService;
public void start() { public void start() {
InterProcessMutex mutex = null; InterProcessMutex mutex = null;
try { try {
// create distributed lock with the root node path of the lock space as /dolphinscheduler/lock/failover/master // create distributed lock with the root node path of the lock space as /dolphinscheduler/lock/failover/master
String znodeLock = getMasterStartUpLockPath(); String znodeLock = getMasterStartUpLockPath();
mutex = new InterProcessMutex(getZkClient(), znodeLock); mutex = new InterProcessMutex(getZkClient(), znodeLock);
mutex.acquire(); mutex.acquire();
// init system znode // init system znode
this.initSystemZNode(); this.initSystemZNode();
while (!checkZKNodeExists(NetUtils.getHost(), ZKNodeType.MASTER)){ while (!checkZKNodeExists(NetUtils.getHost(), ZKNodeType.MASTER)) {
ThreadUtils.sleep(SLEEP_TIME_MILLIS); ThreadUtils.sleep(SLEEP_TIME_MILLIS);
} }
// self tolerant
// self tolerant if (getActiveMasterNum() == 1) {
if (getActiveMasterNum() == 1) { failoverWorker(null, true);
failoverWorker(null, true); failoverMaster(null);
failoverMaster(null); }
}
} catch (Exception e) {
}catch (Exception e){ logger.error("master start up exception", e);
logger.error("master start up exception",e); } finally {
}finally { releaseMutex(mutex);
releaseMutex(mutex); }
} }
}
@Override
@Override public void close() {
public void close(){ super.close();
super.close(); }
}
/**
/** * handle path events that this class cares about
* handle path events that this class cares about *
* @param client zkClient * @param client zkClient
* @param event path event * @param event path event
* @param path zk path * @param path zk path
*/ */
@Override @Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) { protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
//monitor master //monitor master
if(path.startsWith(getZNodeParentPath(ZKNodeType.MASTER)+Constants.SINGLE_SLASH)){ if (path.startsWith(getZNodeParentPath(ZKNodeType.MASTER) + Constants.SINGLE_SLASH)) {
handleMasterEvent(event,path); handleMasterEvent(event, path);
}else if(path.startsWith(getZNodeParentPath(ZKNodeType.WORKER)+Constants.SINGLE_SLASH)){ } else if (path.startsWith(getZNodeParentPath(ZKNodeType.WORKER) + Constants.SINGLE_SLASH)) {
//monitor worker //monitor worker
handleWorkerEvent(event,path); handleWorkerEvent(event, path);
} }
} }
/** /**
* remove zookeeper node path * remove zookeeper node path
* *
* @param path zookeeper node path * @param path zookeeper node path
* @param zkNodeType zookeeper node type * @param zkNodeType zookeeper node type
* @param failover is failover * @param failover is failover
*/ */
private void removeZKNodePath(String path, ZKNodeType zkNodeType, boolean failover) { private void removeZKNodePath(String path, ZKNodeType zkNodeType, boolean failover) {
logger.info("{} node deleted : {}", zkNodeType.toString(), path); logger.info("{} node deleted : {}", zkNodeType.toString(), path);
InterProcessMutex mutex = null; InterProcessMutex mutex = null;
try { try {
String failoverPath = getFailoverLockPath(zkNodeType); String failoverPath = getFailoverLockPath(zkNodeType);
// create a distributed lock // create a distributed lock
mutex = new InterProcessMutex(getZkClient(), failoverPath); mutex = new InterProcessMutex(getZkClient(), failoverPath);
mutex.acquire(); mutex.acquire();
String serverHost = getHostByEventDataPath(path); String serverHost = getHostByEventDataPath(path);
// handle dead server // handle dead server
handleDeadServer(path, zkNodeType, Constants.ADD_ZK_OP); handleDeadServer(path, zkNodeType, Constants.ADD_ZK_OP);
//failover server //failover server
if(failover){ if (failover) {
failoverServerWhenDown(serverHost, zkNodeType); failoverServerWhenDown(serverHost, zkNodeType);
} }
}catch (Exception e){ } catch (Exception e) {
logger.error("{} server failover failed.", zkNodeType.toString()); logger.error("{} server failover failed.", zkNodeType.toString());
logger.error("failover exception ",e); logger.error("failover exception ", e);
} } finally {
finally { releaseMutex(mutex);
releaseMutex(mutex); }
} }
}
/**
/** * failover server when server down
* failover server when server down *
* * @param serverHost server host
* @param serverHost server host * @param zkNodeType zookeeper node type
* @param zkNodeType zookeeper node type * @throws Exception exception
* @throws Exception exception */
*/ private void failoverServerWhenDown(String serverHost, ZKNodeType zkNodeType) throws Exception {
private void failoverServerWhenDown(String serverHost, ZKNodeType zkNodeType) throws Exception { if (StringUtils.isEmpty(serverHost)) {
if(StringUtils.isEmpty(serverHost) || serverHost.startsWith(NetUtils.getHost())){ return;
return ; }
} switch (zkNodeType) {
switch (zkNodeType){ case MASTER:
case MASTER: failoverMaster(serverHost);
failoverMaster(serverHost); break;
break; case WORKER:
case WORKER: failoverWorker(serverHost, true);
failoverWorker(serverHost, true); break;
default: default:
break; break;
} }
} }
/** /**
* get failover lock path * get failover lock path
* *
* @param zkNodeType zookeeper node type * @param zkNodeType zookeeper node type
* @return fail over lock path * @return fail over lock path
*/ */
private String getFailoverLockPath(ZKNodeType zkNodeType){ private String getFailoverLockPath(ZKNodeType zkNodeType) {
switch (zkNodeType){ switch (zkNodeType) {
case MASTER: case MASTER:
return getMasterFailoverLockPath(); return getMasterFailoverLockPath();
case WORKER: case WORKER:
return getWorkerFailoverLockPath(); return getWorkerFailoverLockPath();
default: default:
return ""; return "";
} }
} }
/** /**
* monitor master * monitor master
* @param event event *
* @param path path * @param event event
*/ * @param path path
public void handleMasterEvent(TreeCacheEvent event, String path){ */
switch (event.getType()) { public void handleMasterEvent(TreeCacheEvent event, String path) {
case NODE_ADDED: switch (event.getType()) {
logger.info("master node added : {}", path); case NODE_ADDED:
break; logger.info("master node added : {}", path);
case NODE_REMOVED: break;
removeZKNodePath(path, ZKNodeType.MASTER, true); case NODE_REMOVED:
break; removeZKNodePath(path, ZKNodeType.MASTER, true);
default: break;
break; default:
} break;
} }
}
/**
* monitor worker /**
* @param event event * monitor worker
* @param path path *
*/ * @param event event
public void handleWorkerEvent(TreeCacheEvent event, String path){ * @param path path
switch (event.getType()) { */
case NODE_ADDED: public void handleWorkerEvent(TreeCacheEvent event, String path) {
logger.info("worker node added : {}", path); switch (event.getType()) {
break; case NODE_ADDED:
case NODE_REMOVED: logger.info("worker node added : {}", path);
logger.info("worker node deleted : {}", path); break;
removeZKNodePath(path, ZKNodeType.WORKER, true); case NODE_REMOVED:
break; logger.info("worker node deleted : {}", path);
default: removeZKNodePath(path, ZKNodeType.WORKER, true);
break; break;
} default:
} break;
}
/** }
* task needs failover if task start before worker starts
* /**
* @param taskInstance task instance * task needs failover if task start before worker starts
* @return true if task instance need fail over *
*/ * @param taskInstance task instance
private boolean checkTaskInstanceNeedFailover(TaskInstance taskInstance) throws Exception { * @return true if task instance need fail over
*/
boolean taskNeedFailover = true; private boolean checkTaskInstanceNeedFailover(TaskInstance taskInstance) throws Exception {
//now no host will execute this task instance,so no need to failover the task boolean taskNeedFailover = true;
if(taskInstance.getHost() == null){
return false; //now no host will execute this task instance,so no need to failover the task
} if (taskInstance.getHost() == null) {
return false;
// if the worker node exists in zookeeper, we must check the task starts after the worker }
if(checkZKNodeExists(taskInstance.getHost(), ZKNodeType.WORKER)){
//if task start after worker starts, there is no need to failover the task. // if the worker node exists in zookeeper, we must check the task starts after the worker
if(checkTaskAfterWorkerStart(taskInstance)){ if (checkZKNodeExists(taskInstance.getHost(), ZKNodeType.WORKER)) {
taskNeedFailover = false; //if task start after worker starts, there is no need to failover the task.
} if (checkTaskAfterWorkerStart(taskInstance)) {
} taskNeedFailover = false;
return taskNeedFailover; }
} }
return taskNeedFailover;
/** }
* check task start after the worker server starts.
* /**
* @param taskInstance task instance * check task start after the worker server starts.
* @return true if task instance start time after worker server start date *
*/ * @param taskInstance task instance
private boolean checkTaskAfterWorkerStart(TaskInstance taskInstance) { * @return true if task instance start time after worker server start date
if(StringUtils.isEmpty(taskInstance.getHost())){ */
return false; private boolean checkTaskAfterWorkerStart(TaskInstance taskInstance) {
} if (StringUtils.isEmpty(taskInstance.getHost())) {
Date workerServerStartDate = null; return false;
List<Server> workerServers = getServersList(ZKNodeType.WORKER); }
for(Server workerServer : workerServers){ Date workerServerStartDate = null;
if(taskInstance.getHost().equals(workerServer.getHost() + Constants.COLON + workerServer.getPort())){ List<Server> workerServers = getServersList(ZKNodeType.WORKER);
workerServerStartDate = workerServer.getCreateTime(); for (Server workerServer : workerServers) {
break; if (taskInstance.getHost().equals(workerServer.getHost() + Constants.COLON + workerServer.getPort())) {
} workerServerStartDate = workerServer.getCreateTime();
} break;
}
if(workerServerStartDate != null){ }
return taskInstance.getStartTime().after(workerServerStartDate); if (workerServerStartDate != null) {
}else{ return taskInstance.getStartTime().after(workerServerStartDate);
return false; }
} return false;
} }
/** /**
* failover worker tasks * failover worker tasks
* *
* 1. kill yarn job if there are yarn jobs in tasks. * 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover. * 2. change task state from running to need failover.
* 3. failover all tasks when workerHost is null * 3. failover all tasks when workerHost is null
* @param workerHost worker host * @param workerHost worker host
*/ */
/** /**
* failover worker tasks * failover worker tasks
* * <p>
* 1. kill yarn job if there are yarn jobs in tasks. * 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover. * 2. change task state from running to need failover.
* 3. failover all tasks when workerHost is null * 3. failover all tasks when workerHost is null
* @param workerHost worker host *
* @param needCheckWorkerAlive need check worker alive * @param workerHost worker host
* @throws Exception exception * @param needCheckWorkerAlive need check worker alive
*/ * @throws Exception exception
private void failoverWorker(String workerHost, boolean needCheckWorkerAlive) throws Exception { */
logger.info("start worker[{}] failover ...", workerHost); private void failoverWorker(String workerHost, boolean needCheckWorkerAlive) throws Exception {
logger.info("start worker[{}] failover ...", workerHost);
List<TaskInstance> needFailoverTaskInstanceList = processService.queryNeedFailoverTaskInstances(workerHost);
for(TaskInstance taskInstance : needFailoverTaskInstanceList){ List<TaskInstance> needFailoverTaskInstanceList = processService.queryNeedFailoverTaskInstances(workerHost);
if(needCheckWorkerAlive){ for (TaskInstance taskInstance : needFailoverTaskInstanceList) {
if(!checkTaskInstanceNeedFailover(taskInstance)){ if (needCheckWorkerAlive) {
continue; if (!checkTaskInstanceNeedFailover(taskInstance)) {
} continue;
} }
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
if(processInstance != null){ ProcessInstance processInstance = processService.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
taskInstance.setProcessInstance(processInstance); if (processInstance != null) {
} taskInstance.setProcessInstance(processInstance);
}
TaskExecutionContext taskExecutionContext = TaskExecutionContextBuilder.get()
.buildTaskInstanceRelatedInfo(taskInstance) TaskExecutionContext taskExecutionContext = TaskExecutionContextBuilder.get()
.buildProcessInstanceRelatedInfo(processInstance) .buildTaskInstanceRelatedInfo(taskInstance)
.create(); .buildProcessInstanceRelatedInfo(processInstance)
// only kill yarn job if exists , the local thread has exited .create();
ProcessUtils.killYarnJob(taskExecutionContext); // only kill yarn job if exists , the local thread has exited
ProcessUtils.killYarnJob(taskExecutionContext);
taskInstance.setState(ExecutionStatus.NEED_FAULT_TOLERANCE);
processService.saveTaskInstance(taskInstance); taskInstance.setState(ExecutionStatus.NEED_FAULT_TOLERANCE);
} processService.saveTaskInstance(taskInstance);
logger.info("end worker[{}] failover ...", workerHost); }
} logger.info("end worker[{}] failover ...", workerHost);
}
/**
* failover master tasks /**
* * failover master tasks
* @param masterHost master host *
*/ * @param masterHost master host
private void failoverMaster(String masterHost) { */
logger.info("start master failover ..."); private void failoverMaster(String masterHost) {
logger.info("start master failover ...");
List<ProcessInstance> needFailoverProcessInstanceList = processService.queryNeedFailoverProcessInstances(masterHost);
List<ProcessInstance> needFailoverProcessInstanceList = processService.queryNeedFailoverProcessInstances(masterHost);
//updateProcessInstance host is null and insert into command
for(ProcessInstance processInstance : needFailoverProcessInstanceList){ //updateProcessInstance host is null and insert into command
if(Constants.NULL.equals(processInstance.getHost()) ){ for (ProcessInstance processInstance : needFailoverProcessInstanceList) {
continue; if (Constants.NULL.equals(processInstance.getHost())) {
} continue;
processService.processNeedFailoverProcessInstances(processInstance); }
} processService.processNeedFailoverProcessInstances(processInstance);
}
logger.info("master failover end");
} logger.info("master failover end");
}
public InterProcessMutex blockAcquireMutex() throws Exception {
InterProcessMutex mutex = new InterProcessMutex(getZkClient(), getMasterLockPath()); public InterProcessMutex blockAcquireMutex() throws Exception {
mutex.acquire(); InterProcessMutex mutex = new InterProcessMutex(getZkClient(), getMasterLockPath());
return mutex; mutex.acquire();
} return mutex;
}
} }

167
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java

@ -896,7 +896,7 @@ public class ProcessService {
return task; return task;
} }
if(!task.getState().typeIsFinished()){ if(!task.getState().typeIsFinished()){
createSubWorkProcessCommand(processInstance, task); createSubWorkProcess(processInstance, task);
} }
logger.info("end submit task to db successfully:{} state:{} complete, instance id:{} state: {} ", logger.info("end submit task to db successfully:{} state:{} complete, instance id:{} state: {} ",
@ -906,20 +906,22 @@ public class ProcessService {
/** /**
* set work process instance map * set work process instance map
* consider o
* repeat running does not generate new sub process instance
* set map {parent instance id, task instance id, 0(child instance id)}
* @param parentInstance parentInstance * @param parentInstance parentInstance
* @param parentTask parentTask * @param parentTask parentTask
* @return process instance map * @return process instance map
*/ */
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask){ private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask){
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId()); ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if(processMap != null){ if (processMap != null) {
return processMap; return processMap;
}else if(parentInstance.getCommandType() == CommandType.REPEAT_RUNNING }
|| parentInstance.isComplementData()){ if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) {
// update current task id to map // update current task id to map
// repeat running does not generate new sub process instance
processMap = findPreviousTaskProcessMap(parentInstance, parentTask); processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if(processMap!= null){ if (processMap != null) {
processMap.setParentTaskInstanceId(parentTask.getId()); processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap); updateWorkProcessInstanceMap(processMap);
return processMap; return processMap;
@ -944,11 +946,11 @@ public class ProcessService {
Integer preTaskId = 0; Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId()); List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for(TaskInstance task : preTaskList){ for (TaskInstance task : preTaskList) {
if(task.getName().equals(parentTask.getName())){ if (task.getName().equals(parentTask.getName())) {
preTaskId = task.getId(); preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId); ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if(map!=null){ if (map != null) {
return map; return map;
} }
} }
@ -960,66 +962,111 @@ public class ProcessService {
/** /**
* create sub work process command * create sub work process command
*
* @param parentProcessInstance parentProcessInstance * @param parentProcessInstance parentProcessInstance
* @param task task * @param task task
*/ */
private void createSubWorkProcessCommand(ProcessInstance parentProcessInstance, public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) {
TaskInstance task){ if (!task.isSubProcess()) {
if(!task.isSubProcess()){
return; return;
} }
ProcessInstanceMap instanceMap = setProcessInstanceMap(parentProcessInstance, task); //check create sub work flow firstly
TaskNode taskNode = JSONUtils.parseObject(task.getTaskJson(), TaskNode.class); ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId());
Map<String, String> subProcessParam = JSONUtils.toMap(taskNode.getParams()); if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) {
Integer childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMDPARAM_SUB_PROCESS_DEFINE_ID)); // recover failover tolerance would not create a new command when the sub command already have been created
return;
ProcessInstance childInstance = findSubProcessInstance(parentProcessInstance.getId(), task.getId());
CommandType fatherType = parentProcessInstance.getCommandType();
CommandType commandType = fatherType;
if(childInstance == null){
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
// sub process must begin with schedule/complement data
// if father begin with scheduler/complement data
if(fatherHistoryCommand.startsWith(CommandType.SCHEDULER.toString()) ||
fatherHistoryCommand.startsWith(CommandType.COMPLEMENT_DATA.toString())){
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
} }
instanceMap = setProcessInstanceMap(parentProcessInstance, task);
if(childInstance != null){ ProcessInstance childInstance = null;
childInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); if (instanceMap.getProcessInstanceId() != 0) {
updateProcessInstance(childInstance); childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId());
} }
Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task);
updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionId());
initSubInstanceState(childInstance);
createCommand(subProcessCommand);
logger.info("sub process command created: {} ", subProcessCommand);
}
/**
* complement data needs transform parent parameter to child.
* @param instanceMap
* @param parentProcessInstance
* @return
*/
private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance) {
// set sub work process command // set sub work process command
String processMapStr = JSONUtils.toJsonString(instanceMap); String processMapStr = JSONUtils.toJsonString(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr); Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if (parentProcessInstance.isComplementData()) {
if(commandType == CommandType.COMPLEMENT_DATA ||
(childInstance != null && childInstance.isComplementData())){
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam()); Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE); String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE); String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJsonString(cmdParam); processMapStr = JSONUtils.toJsonString(cmdParam);
} }
return processMapStr;
}
updateSubProcessDefinitionByParent(parentProcessInstance, childDefineId); /**
* create sub work process command
* @param parentProcessInstance
* @param childInstance
* @param instanceMap
* @param task
*/
public Command createSubProcessCommand(ProcessInstance parentProcessInstance,
ProcessInstance childInstance,
ProcessInstanceMap instanceMap,
TaskInstance task) {
CommandType commandType = getSubCommandType(parentProcessInstance, childInstance);
TaskNode taskNode = JSONUtils.parseObject(task.getTaskJson(), TaskNode.class);
Map<String, String> subProcessParam = JSONUtils.toMap(taskNode.getParams());
Integer childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMDPARAM_SUB_PROCESS_DEFINE_ID));
String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance);
return new Command(
commandType,
TaskDependType.TASK_POST,
parentProcessInstance.getFailureStrategy(),
parentProcessInstance.getExecutorId(),
childDefineId,
processParam,
parentProcessInstance.getWarningType(),
parentProcessInstance.getWarningGroupId(),
parentProcessInstance.getScheduleTime(),
parentProcessInstance.getProcessInstancePriority()
);
}
/**
* initialize sub work flow state
* child instance state would be initialized when 'recovery from pause/stop/failure'
* @param childInstance
*/
private void initSubInstanceState(ProcessInstance childInstance) {
if (childInstance != null) {
childInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
updateProcessInstance(childInstance);
}
}
Command command = new Command(); /**
command.setWarningType(parentProcessInstance.getWarningType()); * get sub work flow command type
command.setWarningGroupId(parentProcessInstance.getWarningGroupId()); * child instance exist: child command = fatherCommand
command.setFailureStrategy(parentProcessInstance.getFailureStrategy()); * child instance not exists: child command = fatherCommand[0]
command.setProcessDefinitionId(childDefineId); *
command.setScheduleTime(parentProcessInstance.getScheduleTime()); * @param parentProcessInstance
command.setExecutorId(parentProcessInstance.getExecutorId()); * @return
command.setCommandParam(processMapStr); */
command.setCommandType(commandType); private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) {
command.setProcessInstancePriority(parentProcessInstance.getProcessInstancePriority()); CommandType commandType = parentProcessInstance.getCommandType();
command.setWorkerGroup(parentProcessInstance.getWorkerGroup()); if (childInstance == null) {
createCommand(command); String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
logger.info("sub process command created: {} ", command.toString()); commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
return commandType;
} }
/** /**
@ -1497,20 +1544,6 @@ public class ProcessService {
return result; return result;
} }
/**
* update pid and app links field by task instance id
* @param taskInstId taskInstId
* @param pid pid
* @param appLinks appLinks
*/
public void updatePidByTaskInstId(int taskInstId, int pid,String appLinks) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setPid(pid);
taskInstance.setAppLink(appLinks);
saveTaskInstance(taskInstance);
}
/** /**
* query schedule by id * query schedule by id
* @param id id * @param id id

116
dolphinscheduler-service/src/test/java/org/apache/dolphinscheduler/service/process/ProcessServiceTest.java

@ -0,0 +1,116 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import org.junit.Assert;
import org.junit.Test;
import com.fasterxml.jackson.databind.JsonNode;
/**
* process service test
*/
public class ProcessServiceTest {
@Test
public void testCreateSubCommand() {
ProcessService processService = new ProcessService();
ProcessInstance parentInstance = new ProcessInstance();
parentInstance.setProcessDefinitionId(1);
parentInstance.setWarningType(WarningType.SUCCESS);
parentInstance.setWarningGroupId(0);
TaskInstance task = new TaskInstance();
task.setTaskJson("{\"params\":{\"processDefinitionId\":100}}");
task.setId(10);
ProcessInstance childInstance = null;
ProcessInstanceMap instanceMap = new ProcessInstanceMap();
instanceMap.setParentProcessInstanceId(1);
instanceMap.setParentTaskInstanceId(10);
Command command = null;
//father history: start; child null == command type: start
parentInstance.setHistoryCmd("START_PROCESS");
parentInstance.setCommandType(CommandType.START_PROCESS);
command = processService.createSubProcessCommand(
parentInstance, childInstance, instanceMap, task
);
Assert.assertEquals(CommandType.START_PROCESS, command.getCommandType());
//father history: start,start failure; child null == command type: start
parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS);
parentInstance.setHistoryCmd("START_PROCESS,START_FAILURE_TASK_PROCESS");
command = processService.createSubProcessCommand(
parentInstance, childInstance, instanceMap, task
);
Assert.assertEquals(CommandType.START_PROCESS, command.getCommandType());
//father history: scheduler,start failure; child null == command type: scheduler
parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS);
parentInstance.setHistoryCmd("SCHEDULER,START_FAILURE_TASK_PROCESS");
command = processService.createSubProcessCommand(
parentInstance, childInstance, instanceMap, task
);
Assert.assertEquals(CommandType.SCHEDULER, command.getCommandType());
//father history: complement,start failure; child null == command type: complement
String startString = "2020-01-01 00:00:00";
String endString = "2020-01-10 00:00:00";
parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS);
parentInstance.setHistoryCmd("COMPLEMENT_DATA,START_FAILURE_TASK_PROCESS");
Map<String,String> complementMap = new HashMap<>();
complementMap.put(Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE, startString);
complementMap.put(Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE, endString);
parentInstance.setCommandParam(JSONUtils.toJsonString(complementMap));
command = processService.createSubProcessCommand(
parentInstance, childInstance, instanceMap, task
);
Assert.assertEquals(CommandType.COMPLEMENT_DATA, command.getCommandType());
JsonNode complementDate = JSONUtils.parseObject(command.getCommandParam());
Date start = DateUtils.stringToDate(complementDate.get(Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE).asText());
Date end = DateUtils.stringToDate(complementDate.get(Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE).asText());
Assert.assertEquals(startString, DateUtils.dateToString(start));
Assert.assertEquals(endString, DateUtils.dateToString(end));
//father history: start,failure,start failure; child not null == command type: start failure
childInstance = new ProcessInstance();
parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS);
parentInstance.setHistoryCmd("START_PROCESS,START_FAILURE_TASK_PROCESS");
command = processService.createSubProcessCommand(
parentInstance, childInstance, instanceMap, task
);
Assert.assertEquals(CommandType.START_FAILURE_TASK_PROCESS, command.getCommandType());
}
}

7
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/config.js

@ -236,6 +236,13 @@ const tasksState = {
color: '#5102ce', color: '#5102ce',
icoUnicode: 'ans-icon-coin', icoUnicode: 'ans-icon-coin',
isSpin: false isSpin: false
},
FORCED_SUCCESS: {
id: 13,
desc: `${i18n.$t('Forced success')}`,
color: '#5102ce',
icoUnicode: 'ans-icon-success-solid',
isSpin: false
} }
} }

218
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/_source/dependentTimeout.vue

@ -0,0 +1,218 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="timeout-alarm-model">
<div class="clearfix list">
<div class="text-box">
<span>{{$t('Timeout alarm')}}</span>
</div>
<div class="cont-box">
<label class="label-box">
<div style="padding-top: 5px;">
<x-switch v-model="enable" @on-click="_onSwitch(0, $event)" :disabled="isDetails"></x-switch>
</div>
</label>
</div>
</div>
<div class="clearfix list" v-if="enable">
<div class="text-box">
<span>{{$t('Waiting Dependent start')}}</span>
</div>
<div class="cont-box">
<label class="label-box">
<div style="padding: 5px 0;">
<x-switch v-model="waitStartTimeout.enable" @on-click="_onSwitch(1, $event)" :disabled="isDetails"></x-switch>
</div>
</label>
</div>
</div>
<div class="clearfix list" v-if="enable && waitStartTimeout.enable">
<div class="cont-box">
<label class="label-box">
<span class="text-box">
<span>{{$t('Timeout period')}}</span>
</span>
<x-input v-model="waitStartTimeout.interval" style="width: 100px;" :disabled="isDetails" maxlength="9">
<span slot="append">{{$t('Minute')}}</span>
</x-input>
<span class="text-box">
<span>{{$t('Check interval')}}</span>
</span>
<x-input v-model="waitStartTimeout.checkInterval" style="width: 100px;" :disabled="isDetails" maxlength="9">
<span slot="append">{{$t('Minute')}}</span>
</x-input>
<span class="text-box">
<span>{{$t('Timeout strategy')}}</span>
</span>
<div style="padding-top: 6px;">
<x-checkbox-group v-model="waitStartTimeout.strategy">
<x-checkbox label="FAILED" :disabled="true">{{$t('Timeout failure')}}</x-checkbox>
</x-checkbox-group>
</div>
</label>
</div>
</div>
<div class="clearfix list" v-if="enable">
<div class="text-box">
<span>{{$t('Waiting Dependent complete')}}</span>
</div>
<div class="cont-box">
<label class="label-box">
<div style="padding: 5px 0;">
<x-switch v-model="waitCompleteTimeout.enable" @on-click="_onSwitch(2, $event)" :disabled="isDetails"></x-switch>
</div>
</label>
</div>
</div>
<div class="clearfix list" v-if="enable && waitCompleteTimeout.enable">
<div class="cont-box">
<label class="label-box">
<span class="text-box">
<span>{{$t('Timeout period')}}</span>
</span>
<x-input v-model="waitCompleteTimeout.interval" style="width: 100px;" :disabled="isDetails" maxlength="9">
<span slot="append">{{$t('Minute')}}</span>
</x-input>
<span class="text-box">
<span>{{$t('Timeout strategy')}}</span>
</span>
<div style="padding-top: 6px;">
<x-checkbox-group v-model="waitCompleteTimeout.strategy">
<x-checkbox label="WARN" :disabled="isDetails">{{$t('Timeout alarm')}}</x-checkbox>
<x-checkbox label="FAILED" :disabled="isDetails">{{$t('Timeout failure')}}</x-checkbox>
</x-checkbox-group>
</div>
</label>
</div>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import disabledState from '@/module/mixin/disabledState'
export default {
name: 'form-dependent-timeout',
data () {
return {
// Timeout display hiding
enable: false,
waitStartTimeout: {
enable: false,
// Timeout strategy
strategy: ['FAILED'],
// Timeout period
interval: null,
checkInterval: null
},
waitCompleteTimeout: {
enable: false,
// Timeout strategy
strategy: [],
// Timeout period
interval: null
}
}
},
mixins: [disabledState],
props: {
backfillItem: Object
},
methods: {
_onSwitch (p, is) {
// reset timeout setting when switch timeout on/off.
// p = 0 for timeout switch; p = 1 for wait start timeout switch; p = 2 for wait complete timeout switch.
if (p === 1 || p === 0) {
this.waitStartTimeout.interval = is ? 30 : null
this.waitStartTimeout.checkInterval = is ? 1 : null
}
if (p === 2 || p === 0) {
this.waitCompleteTimeout.strategy = is ? ['WARN'] : []
this.waitCompleteTimeout.interval = is ? 30 : null
}
},
_verification () {
// Verification timeout policy
if (this.enable
&& (this.waitCompleteTimeout.enable && !this.waitCompleteTimeout.strategy.length)
|| (this.waitStartTimeout.enable && !this.waitStartTimeout.strategy.length)) {
this.$message.warning(`${this.$t('Timeout strategy must be selected')}`)
return false
}
// Verify timeout duration Non 0 positive integer
const reg = /^[1-9]\d*$/
if (this.enable
&& (this.waitCompleteTimeout.enable && !reg.test(this.waitCompleteTimeout.interval))
|| (this.waitStartTimeout.enable && (!reg.test(this.waitStartTimeout.interval || !reg.test(this.waitStartTimeout.checkInterval))))) {
this.$message.warning(`${this.$t('Timeout must be a positive integer')}`)
return false
}
// Verify timeout duration longer than check interval
if (this.enable && this.waitStartTimeout.enable && this.waitStartTimeout.checkInterval >= this.waitStartTimeout.interval) {
this.$message.warning(`${this.$t('Timeout must be longer than check interval')}`)
return false
}
this.$emit('on-timeout', {
waitStartTimeout: {
strategy: 'FAILED',
interval: parseInt(this.waitStartTimeout.interval),
checkInterval: parseInt(this.waitStartTimeout.checkInterval),
enable: this.waitStartTimeout.enable
},
waitCompleteTimeout: {
strategy: (() => {
// Handling checkout sequence
let strategy = this.waitCompleteTimeout.strategy
if (strategy.length === 2 && strategy[0] === 'FAILED') {
return [strategy[1], strategy[0]].join(',')
} else {
return strategy.join(',')
}
})(),
interval: parseInt(this.waitCompleteTimeout.interval),
enable: this.waitCompleteTimeout.enable
}
})
return true
}
},
watch: {
},
created () {
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
if (o.timeout) {
this.enable = true
this.waitCompleteTimeout.enable = o.timeout.enable || false
this.waitCompleteTimeout.strategy = _.split(o.timeout.strategy, ',') || ['WARN']
this.waitCompleteTimeout.interval = o.timeout.interval || null
}
if (o.waitStartTimeout) {
this.enable = true
this.waitStartTimeout.enable = o.waitStartTimeout.enable || false
this.waitStartTimeout.strategy = ['FAILED']
this.waitStartTimeout.interval = o.waitStartTimeout.interval || null
this.waitStartTimeout.checkInterval = o.waitStartTimeout.checkInterval || null
}
}
},
mounted () {
},
components: {}
}
</script>

33
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue

@ -162,10 +162,18 @@
<!-- Task timeout alarm --> <!-- Task timeout alarm -->
<m-timeout-alarm <m-timeout-alarm
v-if="taskType !== 'DEPENDENT'"
ref="timeout" ref="timeout"
:backfill-item="backfillItem" :backfill-item="backfillItem"
@on-timeout="_onTimeout"> @on-timeout="_onTimeout">
</m-timeout-alarm> </m-timeout-alarm>
<!-- Dependent timeout alarm -->
<m-dependent-timeout
v-if="taskType === 'DEPENDENT'"
ref="dependentTimeout"
:backfill-item="backfillItem"
@on-timeout="_onDependentTimeout">
</m-dependent-timeout>
<!-- shell node --> <!-- shell node -->
<m-shell <m-shell
@ -315,6 +323,7 @@
import mSubProcess from './tasks/sub_process' import mSubProcess from './tasks/sub_process'
import mSelectInput from './_source/selectInput' import mSelectInput from './_source/selectInput'
import mTimeoutAlarm from './_source/timeoutAlarm' import mTimeoutAlarm from './_source/timeoutAlarm'
import mDependentTimeout from './_source/dependentTimeout'
import mWorkerGroups from './_source/workerGroups' import mWorkerGroups from './_source/workerGroups'
import mPreTasks from './tasks/pre_tasks' import mPreTasks from './tasks/pre_tasks'
import clickoutside from '@/module/util/clickoutside' import clickoutside from '@/module/util/clickoutside'
@ -363,6 +372,8 @@
delayTime: '0', delayTime: '0',
// Task timeout alarm // Task timeout alarm
timeout: {}, timeout: {},
// (For Dependent nodes) Wait start timeout alarm
waitStartTimeout: {},
// Task priority // Task priority
taskInstancePriority: 'MEDIUM', taskInstancePriority: 'MEDIUM',
// worker group id // worker group id
@ -424,6 +435,13 @@
_onTimeout (o) { _onTimeout (o) {
this.timeout = Object.assign(this.timeout, {}, o) this.timeout = Object.assign(this.timeout, {}, o)
}, },
/**
* Dependent timeout alarm
*/
_onDependentTimeout (o) {
this.timeout = Object.assign(this.timeout, {}, o.waitCompleteTimeout)
this.waitStartTimeout = Object.assign(this.waitStartTimeout, {}, o.waitStartTimeout)
},
/** /**
* Click external to close the current component * Click external to close the current component
*/ */
@ -502,6 +520,7 @@
retryInterval: this.retryInterval, retryInterval: this.retryInterval,
delayTime: this.delayTime, delayTime: this.delayTime,
timeout: this.timeout, timeout: this.timeout,
waitStartTimeout: this.waitStartTimeout,
taskInstancePriority: this.taskInstancePriority, taskInstancePriority: this.taskInstancePriority,
workerGroup: this.workerGroup, workerGroup: this.workerGroup,
status: this.status, status: this.status,
@ -555,9 +574,16 @@
return return
} }
// Verify task alarm parameters // Verify task alarm parameters
if (!this.$refs['timeout']._verification()) { if (this.taskType === 'DEPENDENT') {
return if (!this.$refs['dependentTimeout']._verification()) {
return
}
} else {
if (!this.$refs['timeout']._verification()) {
return
}
} }
// Verify node parameters // Verify node parameters
if (!this.$refs[this.taskType]._verification()) { if (!this.$refs[this.taskType]._verification()) {
return return
@ -618,6 +644,7 @@
retryInterval: this.retryInterval, retryInterval: this.retryInterval,
delayTime: this.delayTime, delayTime: this.delayTime,
timeout: this.timeout, timeout: this.timeout,
waitStartTimeout: this.waitStartTimeout,
taskInstancePriority: this.taskInstancePriority, taskInstancePriority: this.taskInstancePriority,
workerGroup: this.workerGroup, workerGroup: this.workerGroup,
status: this.status, status: this.status,
@ -785,6 +812,7 @@
retryInterval: this.retryInterval, retryInterval: this.retryInterval,
delayTime: this.delayTime, delayTime: this.delayTime,
timeout: this.timeout, timeout: this.timeout,
waitStartTimeout: this.waitStartTimeout,
taskInstancePriority: this.taskInstancePriority, taskInstancePriority: this.taskInstancePriority,
workerGroup: this.workerGroup, workerGroup: this.workerGroup,
successBranch: this.successBranch, successBranch: this.successBranch,
@ -810,6 +838,7 @@
mConditions, mConditions,
mSelectInput, mSelectInput,
mTimeoutAlarm, mTimeoutAlarm,
mDependentTimeout,
mPriority, mPriority,
mWorkerGroups, mWorkerGroups,
mPreTasks, mPreTasks,

3
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/_source/instanceConditions/common.js

@ -63,6 +63,9 @@ const stateType = [
}, { }, {
code: 'DELAY_EXECUTION', code: 'DELAY_EXECUTION',
label: `${i18n.$t('Delay execution')}` label: `${i18n.$t('Delay execution')}`
}, {
code: 'FORCED_SUCCESS',
label: `${i18n.$t('Forced success')}`
} }
] ]

23
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/_source/list.vue

@ -91,6 +91,16 @@
<td><span>{{item.duration}}</span></td> <td><span>{{item.duration}}</span></td>
<td><span>{{item.retryTimes}}</span></td> <td><span>{{item.retryTimes}}</span></td>
<td> <td>
<x-button
v-if="item.state === 'FAILURE' || item.state === 'NEED_FAULT_TOLERANCE' || item.state === 'KILL'"
type="error"
shape="circle"
size="xsmall"
data-toggle="tooltip"
:title="$t('Force success')"
icon="ans-icon-success-solid"
@click="_forceSuccess(item)">
</x-button>
<x-button <x-button
type="info" type="info"
shape="circle" shape="circle"
@ -110,6 +120,7 @@
import Permissions from '@/module/permissions' import Permissions from '@/module/permissions'
import mLog from '@/conf/home/pages/dag/_source/formModel/log' import mLog from '@/conf/home/pages/dag/_source/formModel/log'
import { tasksState } from '@/conf/home/pages/dag/_source/config' import { tasksState } from '@/conf/home/pages/dag/_source/config'
import { mapActions } from 'vuex'
export default { export default {
name: 'list', name: 'list',
@ -126,6 +137,7 @@
pageSize: Number pageSize: Number
}, },
methods: { methods: {
...mapActions('dag', ['forceTaskSuccess']),
_rtState (code) { _rtState (code) {
let o = tasksState[code] let o = tasksState[code]
return `<em class="${o.icoUnicode} ${o.isSpin ? 'as as-spin' : ''}" style="color:${o.color}" data-toggle="tooltip" data-container="body" title="${o.desc}"></em>` return `<em class="${o.icoUnicode} ${o.isSpin ? 'as as-spin' : ''}" style="color:${o.color}" data-toggle="tooltip" data-container="body" title="${o.desc}"></em>`
@ -156,6 +168,17 @@
} }
}) })
}, },
_forceSuccess (item) {
this.forceTaskSuccess({taskInstanceId: item.id}).then(res => {
if (res.code === 0) {
this.$message.success(res.msg)
} else {
this.$message.error(res.msg)
}
}).catch(e => {
this.$message.error(e.msg)
})
},
_go (item) { _go (item) {
this.$router.push({ path: `/projects/instance/list/${item.processInstanceId}` }) this.$router.push({ path: `/projects/instance/list/${item.processInstanceId}` })
}, },

12
dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js

@ -734,6 +734,18 @@ export default {
}) })
}) })
}, },
/**
* Force fail/kill/need_fault_tolerance task success
*/
forceTaskSuccess ({ state }, payload) {
return new Promise((resolve, reject) => {
io.post(`projects/${state.projectName}/task-instance/force-success`, payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
},
/** /**
* Query task record list * Query task record list
*/ */

6
dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js

@ -32,6 +32,7 @@ export default {
'Current node settings': 'Current node settings', 'Current node settings': 'Current node settings',
'View history': 'View history', 'View history': 'View history',
'View log': 'View log', 'View log': 'View log',
'Force success': 'Force success',
'Enter this child node': 'Enter this child node', 'Enter this child node': 'Enter this child node',
'Node name': 'Node name', 'Node name': 'Node name',
'Run flag': 'Run flag', 'Run flag': 'Run flag',
@ -426,8 +427,13 @@ export default {
'Timeout alarm': 'Timeout alarm', 'Timeout alarm': 'Timeout alarm',
'Timeout failure': 'Timeout failure', 'Timeout failure': 'Timeout failure',
'Timeout period': 'Timeout period', 'Timeout period': 'Timeout period',
'Waiting Dependent complete': 'Waiting Dependent complete',
'Waiting Dependent start': 'Waiting Dependent start',
'Check interval': 'Check interval',
'Timeout must be longer than check interval': 'Timeout must be longer than check interval',
'Timeout strategy must be selected': 'Timeout strategy must be selected', 'Timeout strategy must be selected': 'Timeout strategy must be selected',
'Timeout must be a positive integer': 'Timeout must be a positive integer', 'Timeout must be a positive integer': 'Timeout must be a positive integer',
'Forced success': 'Forced success',
'Add dependency': 'Add dependency', 'Add dependency': 'Add dependency',
and: 'and', and: 'and',
or: 'or', or: 'or',

6
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js

@ -32,6 +32,7 @@ export default {
'Current node settings': '当前节点设置', 'Current node settings': '当前节点设置',
'View history': '查看历史', 'View history': '查看历史',
'View log': '查看日志', 'View log': '查看日志',
'Force success': '强制成功',
'Enter this child node': '进入该子节点', 'Enter this child node': '进入该子节点',
'Node name': '节点名称', 'Node name': '节点名称',
'Please enter name (required)': '请输入名称(必填)', 'Please enter name (required)': '请输入名称(必填)',
@ -420,8 +421,12 @@ export default {
'Timeout alarm': '超时告警', 'Timeout alarm': '超时告警',
'Timeout failure': '超时失败', 'Timeout failure': '超时失败',
'Timeout period': '超时时长', 'Timeout period': '超时时长',
'Waiting Dependent complete': '等待依赖完成',
'Waiting Dependent start': '等待依赖启动',
'Check interval': '检查间隔',
'Timeout strategy must be selected': '超时策略必须选一个', 'Timeout strategy must be selected': '超时策略必须选一个',
'Timeout must be a positive integer': '超时时长必须为正整数', 'Timeout must be a positive integer': '超时时长必须为正整数',
'Timeout must be longer than check interval': '超时时间必须比检查间隔长',
'Add dependency': '添加依赖', 'Add dependency': '添加依赖',
and: '且', and: '且',
or: '或', or: '或',
@ -432,6 +437,7 @@ export default {
Running: '正在运行', Running: '正在运行',
'Waiting for dependency to complete': '等待依赖完成', 'Waiting for dependency to complete': '等待依赖完成',
'Delay execution': '延时执行', 'Delay execution': '延时执行',
'Forced success': '强制成功过',
Selected: '已选', Selected: '已选',
CurrentHour: '当前小时', CurrentHour: '当前小时',
Last1Hour: '前1小时', Last1Hour: '前1小时',

1
pom.xml

@ -853,6 +853,7 @@
<include>**/server/worker/EnvFileTest.java</include> <include>**/server/worker/EnvFileTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include> <include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include> <include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include> <include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include> <include>**/service/zk/ZKServerTest.java</include>
<include>**/service/zk/CuratorZookeeperClientTest.java</include> <include>**/service/zk/CuratorZookeeperClientTest.java</include>

2
sql/upgrade/1.3.3_schema/mysql/dolphinscheduler_ddl.sql

@ -101,7 +101,7 @@ drop PROCEDURE if EXISTS ct_dolphin_T_t_ds_process_definition_version;
delimiter d// delimiter d//
CREATE PROCEDURE ct_dolphin_T_t_ds_process_definition_version() CREATE PROCEDURE ct_dolphin_T_t_ds_process_definition_version()
BEGIN BEGIN
CREATE TABLE `t_ds_process_definition_version` ( CREATE TABLE IF NOT EXISTS `t_ds_process_definition_version` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`process_definition_id` int(11) NOT NULL COMMENT 'process definition id', `process_definition_id` int(11) NOT NULL COMMENT 'process definition id',
`version` int(11) DEFAULT NULL COMMENT 'process definition version', `version` int(11) DEFAULT NULL COMMENT 'process definition version',

4
sql/upgrade/1.3.3_schema/postgresql/dolphinscheduler_ddl.sql

@ -91,7 +91,7 @@ DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool();
delimiter d// delimiter d//
CREATE OR REPLACE FUNCTION ct_dolphin_T_t_ds_process_definition_version() RETURNS void AS $$ CREATE OR REPLACE FUNCTION ct_dolphin_T_t_ds_process_definition_version() RETURNS void AS $$
BEGIN BEGIN
CREATE TABLE t_ds_process_definition_version ( CREATE TABLE IF NOT EXISTS t_ds_process_definition_version (
id int NOT NULL , id int NOT NULL ,
process_definition_id int NOT NULL , process_definition_id int NOT NULL ,
version int DEFAULT NULL , version int DEFAULT NULL ,
@ -140,5 +140,3 @@ DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_resources_un();

Loading…
Cancel
Save