authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
default:
@@ -2007,9 +1983,6 @@ public class ProcessService {
/**
* format task app id in task instance
- *
- * @param taskInstance
- * @return
*/
public String formatTaskAppId(TaskInstance taskInstance) {
ProcessDefinition definition = this.findProcessDefineById(taskInstance.getProcessDefinitionId());
@@ -2019,9 +1992,9 @@ public class ProcessService {
return "";
}
return String.format("%s_%s_%s",
- definition.getId(),
- processInstanceById.getId(),
- taskInstance.getId());
+ definition.getId(),
+ processInstanceById.getId(),
+ taskInstance.getId());
}
}
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/ProcessScheduleJob.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/ProcessScheduleJob.java
index 6ac847b8db..2921ce2bba 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/ProcessScheduleJob.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/ProcessScheduleJob.java
@@ -14,8 +14,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.dolphinscheduler.service.quartz;
+package org.apache.dolphinscheduler.service.quartz;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
@@ -25,6 +25,9 @@ import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
+
+import java.util.Date;
+
import org.quartz.Job;
import org.quartz.JobDataMap;
import org.quartz.JobExecutionContext;
@@ -34,8 +37,6 @@ import org.slf4j.LoggerFactory;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
-import java.util.Date;
-
/**
* process schedule job
*/
@@ -46,7 +47,7 @@ public class ProcessScheduleJob implements Job {
*/
private static final Logger logger = LoggerFactory.getLogger(ProcessScheduleJob.class);
- public ProcessService getProcessService(){
+ public ProcessService getProcessService() {
return SpringApplicationContext.getBean(ProcessService.class);
}
@@ -66,10 +67,8 @@ public class ProcessScheduleJob implements Job {
int projectId = dataMap.getInt(Constants.PROJECT_ID);
int scheduleId = dataMap.getInt(Constants.SCHEDULE_ID);
-
Date scheduledFireTime = context.getScheduledFireTime();
-
Date fireTime = context.getFireTime();
logger.info("scheduled fire time :{}, fire time :{}, process id :{}", scheduledFireTime, fireTime, scheduleId);
@@ -82,11 +81,10 @@ public class ProcessScheduleJob implements Job {
return;
}
-
ProcessDefinition processDefinition = getProcessService().findProcessDefineById(schedule.getProcessDefinitionId());
// release state : online/offline
ReleaseState releaseState = processDefinition.getReleaseState();
- if (processDefinition == null || releaseState == ReleaseState.OFFLINE) {
+ if (releaseState == ReleaseState.OFFLINE) {
logger.warn("process definition does not exist in db or offline,need not to create command, projectId:{}, processId:{}", projectId, scheduleId);
return;
}
@@ -107,7 +105,6 @@ public class ProcessScheduleJob implements Job {
getProcessService().createCommand(command);
}
-
/**
* delete job
*/
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/QuartzExecutors.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/QuartzExecutors.java
index 3b15810e05..fd91e4076d 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/QuartzExecutors.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/QuartzExecutors.java
@@ -14,15 +14,76 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.dolphinscheduler.service.quartz;
+import static org.apache.dolphinscheduler.common.Constants.ORG_POSTGRESQL_DRIVER;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_JOBSTORE_CLASS;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_JOBSTORE_DATASOURCE;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_JOBSTORE_ISCLUSTERED;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_JOBSTORE_TABLEPREFIX;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_JOBSTORE_USEPROPERTIES;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_SCHEDULER_INSTANCEID;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_SCHEDULER_INSTANCENAME;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_THREADPOOL_CLASS;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_THREADPOOL_THREADCOUNT;
+import static org.apache.dolphinscheduler.common.Constants.ORG_QUARTZ_THREADPOOL_THREADPRIORITY;
+import static org.apache.dolphinscheduler.common.Constants.PROJECT_ID;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_ACQUIRETRIGGERSWITHINLOCK;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_CLUSTERCHECKININTERVAL;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_DATASOURCE;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_INSTANCEID;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_INSTANCENAME;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_JOB_GROUP_PRIFIX;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_JOB_PRIFIX;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_MISFIRETHRESHOLD;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_PROPERTIES_PATH;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_TABLE_PREFIX;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_THREADCOUNT;
+import static org.apache.dolphinscheduler.common.Constants.QUARTZ_THREADPRIORITY;
+import static org.apache.dolphinscheduler.common.Constants.SCHEDULE;
+import static org.apache.dolphinscheduler.common.Constants.SCHEDULE_ID;
+import static org.apache.dolphinscheduler.common.Constants.SPRING_DATASOURCE_DRIVER_CLASS_NAME;
+import static org.apache.dolphinscheduler.common.Constants.STRING_FALSE;
+import static org.apache.dolphinscheduler.common.Constants.STRING_TRUE;
+import static org.apache.dolphinscheduler.common.Constants.UNDERLINE;
+
+import static org.quartz.CronScheduleBuilder.cronSchedule;
+import static org.quartz.JobBuilder.newJob;
+import static org.quartz.TriggerBuilder.newTrigger;
+
+import org.apache.dolphinscheduler.common.utils.JSONUtils;
+import org.apache.dolphinscheduler.common.utils.StringUtils;
+import org.apache.dolphinscheduler.dao.entity.Schedule;
+import org.apache.dolphinscheduler.service.exceptions.ServiceException;
+
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
-import org.apache.commons.lang.StringUtils;
-import org.apache.dolphinscheduler.common.utils.*;
-import org.apache.dolphinscheduler.dao.entity.Schedule;
-import org.quartz.*;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.quartz.CronTrigger;
+import org.quartz.Job;
+import org.quartz.JobDetail;
+import org.quartz.JobKey;
+import org.quartz.Scheduler;
+import org.quartz.SchedulerException;
+import org.quartz.TriggerKey;
import org.quartz.impl.StdSchedulerFactory;
import org.quartz.impl.jdbcjobstore.JobStoreTX;
import org.quartz.impl.jdbcjobstore.PostgreSQLDelegate;
@@ -32,300 +93,289 @@ import org.quartz.simpl.SimpleThreadPool;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.*;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import static org.apache.dolphinscheduler.common.Constants.*;
-import static org.quartz.CronScheduleBuilder.cronSchedule;
-import static org.quartz.JobBuilder.newJob;
-import static org.quartz.TriggerBuilder.newTrigger;
-
/**
* single Quartz executors instance
*/
public class QuartzExecutors {
- /**
- * logger of QuartzExecutors
- */
- private static final Logger logger = LoggerFactory.getLogger(QuartzExecutors.class);
-
- /**
- * read write lock
- */
- private final ReadWriteLock lock = new ReentrantReadWriteLock();
-
- /**
- * A Scheduler maintains a registry of org.quartz.JobDetail and Trigger.
- */
- private static Scheduler scheduler;
-
- /**
- * load conf
- */
- private static Configuration conf;
-
- private static final class Holder {
- private static final QuartzExecutors instance = new QuartzExecutors();
- }
-
-
- private QuartzExecutors() {
- try {
- conf = new PropertiesConfiguration(QUARTZ_PROPERTIES_PATH);
- init();
- }catch (ConfigurationException e){
- logger.warn("not loaded quartz configuration file, will used default value",e);
+ /**
+ * logger of QuartzExecutors
+ */
+ private static final Logger logger = LoggerFactory.getLogger(QuartzExecutors.class);
+
+ /**
+ * read write lock
+ */
+ private final ReadWriteLock lock = new ReentrantReadWriteLock();
+
+ /**
+ * A Scheduler maintains a registry of org.quartz.JobDetail and Trigger.
+ */
+ private static Scheduler scheduler;
+
+ /**
+ * load conf
+ */
+ private static Configuration conf;
+
+ private static final class Holder {
+ private static final QuartzExecutors instance = new QuartzExecutors();
}
- }
-
- /**
- * thread safe and performance promote
- * @return instance of Quartz Executors
- */
- public static QuartzExecutors getInstance() {
- return Holder.instance;
- }
-
-
- /**
- * init
- *
- * Returns a client-usable handle to a Scheduler.
- */
- private void init() {
- try {
- StdSchedulerFactory schedulerFactory = new StdSchedulerFactory();
- Properties properties = new Properties();
-
- String dataSourceDriverClass = org.apache.dolphinscheduler.dao.utils.PropertyUtils.getString(SPRING_DATASOURCE_DRIVER_CLASS_NAME);
- if (dataSourceDriverClass.equals(ORG_POSTGRESQL_DRIVER)){
- properties.setProperty(ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS,conf.getString(ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS, PostgreSQLDelegate.class.getName()));
- } else {
- properties.setProperty(ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS,conf.getString(ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS, StdJDBCDelegate.class.getName()));
- }
- properties.setProperty(ORG_QUARTZ_SCHEDULER_INSTANCENAME, conf.getString(ORG_QUARTZ_SCHEDULER_INSTANCENAME, QUARTZ_INSTANCENAME));
- properties.setProperty(ORG_QUARTZ_SCHEDULER_INSTANCEID, conf.getString(ORG_QUARTZ_SCHEDULER_INSTANCEID, QUARTZ_INSTANCEID));
- properties.setProperty(ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON,conf.getString(ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON,STRING_TRUE));
- properties.setProperty(ORG_QUARTZ_JOBSTORE_USEPROPERTIES,conf.getString(ORG_QUARTZ_JOBSTORE_USEPROPERTIES,STRING_FALSE));
- properties.setProperty(ORG_QUARTZ_THREADPOOL_CLASS,conf.getString(ORG_QUARTZ_THREADPOOL_CLASS, SimpleThreadPool.class.getName()));
- properties.setProperty(ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS,conf.getString(ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS,STRING_TRUE));
- properties.setProperty(ORG_QUARTZ_THREADPOOL_THREADCOUNT,conf.getString(ORG_QUARTZ_THREADPOOL_THREADCOUNT, QUARTZ_THREADCOUNT));
- properties.setProperty(ORG_QUARTZ_THREADPOOL_THREADPRIORITY,conf.getString(ORG_QUARTZ_THREADPOOL_THREADPRIORITY, QUARTZ_THREADPRIORITY));
- properties.setProperty(ORG_QUARTZ_JOBSTORE_CLASS,conf.getString(ORG_QUARTZ_JOBSTORE_CLASS, JobStoreTX.class.getName()));
- properties.setProperty(ORG_QUARTZ_JOBSTORE_TABLEPREFIX,conf.getString(ORG_QUARTZ_JOBSTORE_TABLEPREFIX, QUARTZ_TABLE_PREFIX));
- properties.setProperty(ORG_QUARTZ_JOBSTORE_ISCLUSTERED,conf.getString(ORG_QUARTZ_JOBSTORE_ISCLUSTERED,STRING_TRUE));
- properties.setProperty(ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD,conf.getString(ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD, QUARTZ_MISFIRETHRESHOLD));
- properties.setProperty(ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL,conf.getString(ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL, QUARTZ_CLUSTERCHECKININTERVAL));
- properties.setProperty(ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK,conf.getString(ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK, QUARTZ_ACQUIRETRIGGERSWITHINLOCK));
- properties.setProperty(ORG_QUARTZ_JOBSTORE_DATASOURCE,conf.getString(ORG_QUARTZ_JOBSTORE_DATASOURCE, QUARTZ_DATASOURCE));
- properties.setProperty(ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS,conf.getString(ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS,DruidConnectionProvider.class.getName()));
-
- schedulerFactory.initialize(properties);
- scheduler = schedulerFactory.getScheduler();
-
- } catch (SchedulerException e) {
- logger.error(e.getMessage(),e);
- System.exit(1);
+
+ private QuartzExecutors() {
+ try {
+ conf = new PropertiesConfiguration(QUARTZ_PROPERTIES_PATH);
+ init();
+ } catch (ConfigurationException e) {
+ logger.warn("not loaded quartz configuration file, will used default value", e);
+ }
}
- }
-
- /**
- * Whether the scheduler has been started.
- *
- * @throws SchedulerException scheduler exception
- */
- public void start() throws SchedulerException {
- if (!scheduler.isStarted()){
- scheduler.start();
- logger.info("Quartz service started" );
+ /**
+ * thread safe and performance promote
+ *
+ * @return instance of Quartz Executors
+ */
+ public static QuartzExecutors getInstance() {
+ return Holder.instance;
}
- }
-
- /**
- * stop all scheduled tasks
- *
- * Halts the Scheduler's firing of Triggers,
- * and cleans up all resources associated with the Scheduler.
- *
- * The scheduler cannot be re-started.
- * @throws SchedulerException scheduler exception
- */
- public void shutdown() throws SchedulerException {
- if (!scheduler.isShutdown()) {
- // don't wait for the task to complete
- scheduler.shutdown();
- logger.info("Quartz service stopped, and halt all tasks");
+
+ /**
+ * init
+ *
+ * Returns a client-usable handle to a Scheduler.
+ */
+ private void init() {
+ try {
+ StdSchedulerFactory schedulerFactory = new StdSchedulerFactory();
+ Properties properties = new Properties();
+
+ String dataSourceDriverClass = org.apache.dolphinscheduler.dao.utils.PropertyUtils.getString(SPRING_DATASOURCE_DRIVER_CLASS_NAME);
+ if (dataSourceDriverClass.equals(ORG_POSTGRESQL_DRIVER)) {
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS, conf.getString(ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS, PostgreSQLDelegate.class.getName()));
+ } else {
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS, conf.getString(ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS, StdJDBCDelegate.class.getName()));
+ }
+ properties.setProperty(ORG_QUARTZ_SCHEDULER_INSTANCENAME, conf.getString(ORG_QUARTZ_SCHEDULER_INSTANCENAME, QUARTZ_INSTANCENAME));
+ properties.setProperty(ORG_QUARTZ_SCHEDULER_INSTANCEID, conf.getString(ORG_QUARTZ_SCHEDULER_INSTANCEID, QUARTZ_INSTANCEID));
+ properties.setProperty(ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON, conf.getString(ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON, STRING_TRUE));
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_USEPROPERTIES, conf.getString(ORG_QUARTZ_JOBSTORE_USEPROPERTIES, STRING_FALSE));
+ properties.setProperty(ORG_QUARTZ_THREADPOOL_CLASS, conf.getString(ORG_QUARTZ_THREADPOOL_CLASS, SimpleThreadPool.class.getName()));
+ properties.setProperty(ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS, conf.getString(ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS, STRING_TRUE));
+ properties.setProperty(ORG_QUARTZ_THREADPOOL_THREADCOUNT, conf.getString(ORG_QUARTZ_THREADPOOL_THREADCOUNT, QUARTZ_THREADCOUNT));
+ properties.setProperty(ORG_QUARTZ_THREADPOOL_THREADPRIORITY, conf.getString(ORG_QUARTZ_THREADPOOL_THREADPRIORITY, QUARTZ_THREADPRIORITY));
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_CLASS, conf.getString(ORG_QUARTZ_JOBSTORE_CLASS, JobStoreTX.class.getName()));
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_TABLEPREFIX, conf.getString(ORG_QUARTZ_JOBSTORE_TABLEPREFIX, QUARTZ_TABLE_PREFIX));
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_ISCLUSTERED, conf.getString(ORG_QUARTZ_JOBSTORE_ISCLUSTERED, STRING_TRUE));
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD, conf.getString(ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD, QUARTZ_MISFIRETHRESHOLD));
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL, conf.getString(ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL, QUARTZ_CLUSTERCHECKININTERVAL));
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK, conf.getString(ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK, QUARTZ_ACQUIRETRIGGERSWITHINLOCK));
+ properties.setProperty(ORG_QUARTZ_JOBSTORE_DATASOURCE, conf.getString(ORG_QUARTZ_JOBSTORE_DATASOURCE, QUARTZ_DATASOURCE));
+ properties.setProperty(ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS, conf.getString(ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS, DruidConnectionProvider.class.getName()));
+
+ schedulerFactory.initialize(properties);
+ scheduler = schedulerFactory.getScheduler();
+
+ } catch (SchedulerException e) {
+ logger.error(e.getMessage(), e);
+ System.exit(1);
+ }
+
}
- }
-
-
- /**
- * add task trigger , if this task already exists, return this task with updated trigger
- *
- * @param clazz job class name
- * @param jobName job name
- * @param jobGroupName job group name
- * @param startDate job start date
- * @param endDate job end date
- * @param cronExpression cron expression
- * @param jobDataMap job parameters data map
- */
- public void addJob(Class extends Job> clazz,String jobName,String jobGroupName,Date startDate, Date endDate,
- String cronExpression,
- Map jobDataMap) {
- lock.writeLock().lock();
- try {
-
- JobKey jobKey = new JobKey(jobName, jobGroupName);
- JobDetail jobDetail;
- //add a task (if this task already exists, return this task directly)
- if (scheduler.checkExists(jobKey)) {
-
- jobDetail = scheduler.getJobDetail(jobKey);
- if (jobDataMap != null) {
- jobDetail.getJobDataMap().putAll(jobDataMap);
+
+ /**
+ * Whether the scheduler has been started.
+ *
+ * @throws SchedulerException scheduler exception
+ */
+ public void start() throws SchedulerException {
+ if (!scheduler.isStarted()) {
+ scheduler.start();
+ logger.info("Quartz service started");
}
- } else {
- jobDetail = newJob(clazz).withIdentity(jobKey).build();
+ }
- if (jobDataMap != null) {
- jobDetail.getJobDataMap().putAll(jobDataMap);
+ /**
+ * stop all scheduled tasks
+ *
+ * Halts the Scheduler's firing of Triggers,
+ * and cleans up all resources associated with the Scheduler.
+ *
+ * The scheduler cannot be re-started.
+ *
+ * @throws SchedulerException scheduler exception
+ */
+ public void shutdown() throws SchedulerException {
+ if (!scheduler.isShutdown()) {
+ // don't wait for the task to complete
+ scheduler.shutdown();
+ logger.info("Quartz service stopped, and halt all tasks");
}
+ }
- scheduler.addJob(jobDetail, false, true);
-
- logger.info("Add job, job name: {}, group name: {}",
- jobName, jobGroupName);
- }
-
- TriggerKey triggerKey = new TriggerKey(jobName, jobGroupName);
- /**
- * Instructs the Scheduler that upon a mis-fire
- * situation, the CronTrigger wants to have it's
- * next-fire-time updated to the next time in the schedule after the
- * current time (taking into account any associated Calendar),
- * but it does not want to be fired now.
- */
- CronTrigger cronTrigger = newTrigger().withIdentity(triggerKey).startAt(startDate).endAt(endDate)
- .withSchedule(cronSchedule(cronExpression).withMisfireHandlingInstructionDoNothing())
- .forJob(jobDetail).build();
-
- if (scheduler.checkExists(triggerKey)) {
- // updateProcessInstance scheduler trigger when scheduler cycle changes
- CronTrigger oldCronTrigger = (CronTrigger) scheduler.getTrigger(triggerKey);
- String oldCronExpression = oldCronTrigger.getCronExpression();
-
- if (!StringUtils.equalsIgnoreCase(cronExpression,oldCronExpression)) {
- // reschedule job trigger
- scheduler.rescheduleJob(triggerKey, cronTrigger);
- logger.info("reschedule job trigger, triggerName: {}, triggerGroupName: {}, cronExpression: {}, startDate: {}, endDate: {}",
- jobName, jobGroupName, cronExpression, startDate, endDate);
- }
- } else {
- scheduler.scheduleJob(cronTrigger);
- logger.info("schedule job trigger, triggerName: {}, triggerGroupName: {}, cronExpression: {}, startDate: {}, endDate: {}",
- jobName, jobGroupName, cronExpression, startDate, endDate);
- }
-
- } catch (Exception e) {
- logger.error("add job failed", e);
- throw new RuntimeException("add job failed", e);
- } finally {
- lock.writeLock().unlock();
+ /**
+ * add task trigger , if this task already exists, return this task with updated trigger
+ *
+ * @param clazz job class name
+ * @param jobName job name
+ * @param jobGroupName job group name
+ * @param startDate job start date
+ * @param endDate job end date
+ * @param cronExpression cron expression
+ * @param jobDataMap job parameters data map
+ */
+ public void addJob(Class extends Job> clazz, String jobName, String jobGroupName, Date startDate, Date endDate,
+ String cronExpression,
+ Map jobDataMap) {
+ lock.writeLock().lock();
+ try {
+
+ JobKey jobKey = new JobKey(jobName, jobGroupName);
+ JobDetail jobDetail;
+ //add a task (if this task already exists, return this task directly)
+ if (scheduler.checkExists(jobKey)) {
+
+ jobDetail = scheduler.getJobDetail(jobKey);
+ if (jobDataMap != null) {
+ jobDetail.getJobDataMap().putAll(jobDataMap);
+ }
+ } else {
+ jobDetail = newJob(clazz).withIdentity(jobKey).build();
+
+ if (jobDataMap != null) {
+ jobDetail.getJobDataMap().putAll(jobDataMap);
+ }
+
+ scheduler.addJob(jobDetail, false, true);
+
+ logger.info("Add job, job name: {}, group name: {}",
+ jobName, jobGroupName);
+ }
+
+ TriggerKey triggerKey = new TriggerKey(jobName, jobGroupName);
+ /**
+ * Instructs the Scheduler that upon a mis-fire
+ * situation, the CronTrigger wants to have it's
+ * next-fire-time updated to the next time in the schedule after the
+ * current time (taking into account any associated Calendar),
+ * but it does not want to be fired now.
+ */
+ CronTrigger cronTrigger = newTrigger().withIdentity(triggerKey).startAt(startDate).endAt(endDate)
+ .withSchedule(cronSchedule(cronExpression).withMisfireHandlingInstructionDoNothing())
+ .forJob(jobDetail).build();
+
+ if (scheduler.checkExists(triggerKey)) {
+ // updateProcessInstance scheduler trigger when scheduler cycle changes
+ CronTrigger oldCronTrigger = (CronTrigger) scheduler.getTrigger(triggerKey);
+ String oldCronExpression = oldCronTrigger.getCronExpression();
+
+ if (!StringUtils.equalsIgnoreCase(cronExpression, oldCronExpression)) {
+ // reschedule job trigger
+ scheduler.rescheduleJob(triggerKey, cronTrigger);
+ logger.info("reschedule job trigger, triggerName: {}, triggerGroupName: {}, cronExpression: {}, startDate: {}, endDate: {}",
+ jobName, jobGroupName, cronExpression, startDate, endDate);
+ }
+ } else {
+ scheduler.scheduleJob(cronTrigger);
+ logger.info("schedule job trigger, triggerName: {}, triggerGroupName: {}, cronExpression: {}, startDate: {}, endDate: {}",
+ jobName, jobGroupName, cronExpression, startDate, endDate);
+ }
+
+ } catch (Exception e) {
+ throw new ServiceException("add job failed", e);
+ } finally {
+ lock.writeLock().unlock();
+ }
}
- }
-
-
- /**
- * delete job
- *
- * @param jobName job name
- * @param jobGroupName job group name
- * @return true if the Job was found and deleted.
- */
- public boolean deleteJob(String jobName, String jobGroupName) {
- lock.writeLock().lock();
- try {
- JobKey jobKey = new JobKey(jobName,jobGroupName);
- if(scheduler.checkExists(jobKey)){
- logger.info("try to delete job, job name: {}, job group name: {},", jobName, jobGroupName);
- return scheduler.deleteJob(jobKey);
- }else {
- return true;
- }
-
- } catch (SchedulerException e) {
- logger.error("delete job : {} failed",jobName, e);
- } finally {
- lock.writeLock().unlock();
+
+ /**
+ * delete job
+ *
+ * @param jobName job name
+ * @param jobGroupName job group name
+ * @return true if the Job was found and deleted.
+ */
+ public boolean deleteJob(String jobName, String jobGroupName) {
+ lock.writeLock().lock();
+ try {
+ JobKey jobKey = new JobKey(jobName, jobGroupName);
+ if (scheduler.checkExists(jobKey)) {
+ logger.info("try to delete job, job name: {}, job group name: {},", jobName, jobGroupName);
+ return scheduler.deleteJob(jobKey);
+ } else {
+ return true;
+ }
+
+ } catch (SchedulerException e) {
+ logger.error("delete job : {} failed", jobName, e);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ return false;
+ }
+
+ /**
+ * delete all jobs in job group
+ *
+ * @param jobGroupName job group name
+ * @return true if all of the Jobs were found and deleted, false if
+ * one or more were not deleted.
+ */
+ public boolean deleteAllJobs(String jobGroupName) {
+ lock.writeLock().lock();
+ try {
+ logger.info("try to delete all jobs in job group: {}", jobGroupName);
+ List jobKeys = new ArrayList<>();
+ jobKeys.addAll(scheduler.getJobKeys(GroupMatcher.groupEndsWith(jobGroupName)));
+
+ return scheduler.deleteJobs(jobKeys);
+ } catch (SchedulerException e) {
+ logger.error("delete all jobs in job group: {} failed", jobGroupName, e);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ return false;
}
- return false;
- }
-
- /**
- * delete all jobs in job group
- *
- * @param jobGroupName job group name
- *
- * @return true if all of the Jobs were found and deleted, false if
- * one or more were not deleted.
- */
- public boolean deleteAllJobs(String jobGroupName) {
- lock.writeLock().lock();
- try {
- logger.info("try to delete all jobs in job group: {}", jobGroupName);
- List jobKeys = new ArrayList<>();
- jobKeys.addAll(scheduler.getJobKeys(GroupMatcher.groupEndsWith(jobGroupName)));
-
- return scheduler.deleteJobs(jobKeys);
- } catch (SchedulerException e) {
- logger.error("delete all jobs in job group: {} failed",jobGroupName, e);
- } finally {
- lock.writeLock().unlock();
+
+ /**
+ * build job name
+ *
+ * @param processId process id
+ * @return job name
+ */
+ public static String buildJobName(int processId) {
+ StringBuilder sb = new StringBuilder(30);
+ sb.append(QUARTZ_JOB_PRIFIX).append(UNDERLINE).append(processId);
+ return sb.toString();
+ }
+
+ /**
+ * build job group name
+ *
+ * @param projectId project id
+ * @return job group name
+ */
+ public static String buildJobGroupName(int projectId) {
+ StringBuilder sb = new StringBuilder(30);
+ sb.append(QUARTZ_JOB_GROUP_PRIFIX).append(UNDERLINE).append(projectId);
+ return sb.toString();
+ }
+
+ /**
+ * add params to map
+ *
+ * @param projectId project id
+ * @param scheduleId schedule id
+ * @param schedule schedule
+ * @return data map
+ */
+ public static Map buildDataMap(int projectId, int scheduleId, Schedule schedule) {
+ Map dataMap = new HashMap<>(3);
+ dataMap.put(PROJECT_ID, projectId);
+ dataMap.put(SCHEDULE_ID, scheduleId);
+ dataMap.put(SCHEDULE, JSONUtils.toJsonString(schedule));
+
+ return dataMap;
}
- return false;
- }
-
- /**
- * build job name
- * @param processId process id
- * @return job name
- */
- public static String buildJobName(int processId) {
- StringBuilder sb = new StringBuilder(30);
- sb.append(QUARTZ_JOB_PRIFIX).append(UNDERLINE).append(processId);
- return sb.toString();
- }
-
- /**
- * build job group name
- * @param projectId project id
- * @return job group name
- */
- public static String buildJobGroupName(int projectId) {
- StringBuilder sb = new StringBuilder(30);
- sb.append(QUARTZ_JOB_GROUP_PRIFIX).append(UNDERLINE).append(projectId);
- return sb.toString();
- }
-
- /**
- * add params to map
- *
- * @param projectId project id
- * @param scheduleId schedule id
- * @param schedule schedule
- * @return data map
- */
- public static Map buildDataMap(int projectId, int scheduleId, Schedule schedule) {
- Map dataMap = new HashMap<>(3);
- dataMap.put(PROJECT_ID, projectId);
- dataMap.put(SCHEDULE_ID, scheduleId);
- dataMap.put(SCHEDULE, JSONUtils.toJsonString(schedule));
-
- return dataMap;
- }
}
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/cron/AbstractCycle.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/cron/AbstractCycle.java
index 0a2e31b610..60c862340b 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/cron/AbstractCycle.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/quartz/cron/AbstractCycle.java
@@ -14,159 +14,177 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.dolphinscheduler.service.quartz.cron;
+import org.apache.dolphinscheduler.common.enums.CycleEnum;
+
import com.cronutils.model.Cron;
import com.cronutils.model.field.CronField;
import com.cronutils.model.field.CronFieldName;
-import com.cronutils.model.field.expression.*;
-import org.apache.dolphinscheduler.common.enums.CycleEnum;
+import com.cronutils.model.field.expression.Always;
+import com.cronutils.model.field.expression.And;
+import com.cronutils.model.field.expression.Between;
+import com.cronutils.model.field.expression.Every;
+import com.cronutils.model.field.expression.FieldExpression;
+import com.cronutils.model.field.expression.On;
/**
* Cycle
*/
public abstract class AbstractCycle {
- protected Cron cron;
-
- protected CronField minField;
- protected CronField hourField;
- protected CronField dayOfMonthField;
- protected CronField dayOfWeekField;
- protected CronField monthField;
- protected CronField yearField;
-
- public CycleLinks addCycle(AbstractCycle cycle) {
- return new CycleLinks(this.cron).addCycle(this).addCycle(cycle);
- }
-
- /**
- * cycle constructor
- * @param cron cron
- */
- public AbstractCycle(Cron cron) {
- if (cron == null) {
- throw new IllegalArgumentException("cron must not be null!");
+ protected Cron cron;
+
+ protected CronField minField;
+ protected CronField hourField;
+ protected CronField dayOfMonthField;
+ protected CronField dayOfWeekField;
+ protected CronField monthField;
+ protected CronField yearField;
+
+ public CycleLinks addCycle(AbstractCycle cycle) {
+ return new CycleLinks(this.cron).addCycle(this).addCycle(cycle);
+ }
+
+ /**
+ * cycle constructor
+ *
+ * @param cron cron
+ */
+ protected AbstractCycle(Cron cron) {
+ if (cron == null) {
+ throw new IllegalArgumentException("cron must not be null!");
+ }
+
+ this.cron = cron;
+ this.minField = cron.retrieve(CronFieldName.MINUTE);
+ this.hourField = cron.retrieve(CronFieldName.HOUR);
+ this.dayOfMonthField = cron.retrieve(CronFieldName.DAY_OF_MONTH);
+ this.dayOfWeekField = cron.retrieve(CronFieldName.DAY_OF_WEEK);
+ this.monthField = cron.retrieve(CronFieldName.MONTH);
+ this.yearField = cron.retrieve(CronFieldName.YEAR);
+ }
+
+ /**
+ * whether the minute field has a value
+ *
+ * @return if minute field has a value return true,else return false
+ */
+ protected boolean minFiledIsSetAll() {
+ FieldExpression minFieldExpression = minField.getExpression();
+ return (minFieldExpression instanceof Every || minFieldExpression instanceof Always
+ || minFieldExpression instanceof Between || minFieldExpression instanceof And
+ || minFieldExpression instanceof On);
+ }
+
+ /**
+ * whether the minute field has a value of every or always
+ *
+ * @return if minute field has a value of every or always return true,else return false
+ */
+ protected boolean minFiledIsEvery() {
+ FieldExpression minFieldExpression = minField.getExpression();
+ return (minFieldExpression instanceof Every || minFieldExpression instanceof Always);
+ }
+
+ /**
+ * whether the hour field has a value
+ *
+ * @return if hour field has a value return true,else return false
+ */
+ protected boolean hourFiledIsSetAll() {
+ FieldExpression hourFieldExpression = hourField.getExpression();
+ return (hourFieldExpression instanceof Every || hourFieldExpression instanceof Always
+ || hourFieldExpression instanceof Between || hourFieldExpression instanceof And
+ || hourFieldExpression instanceof On);
+ }
+
+ /**
+ * whether the hour field has a value of every or always
+ *
+ * @return if hour field has a value of every or always return true,else return false
+ */
+ protected boolean hourFiledIsEvery() {
+ FieldExpression hourFieldExpression = hourField.getExpression();
+ return (hourFieldExpression instanceof Every || hourFieldExpression instanceof Always);
+ }
+
+ /**
+ * whether the day Of month field has a value
+ *
+ * @return if day Of month field has a value return true,else return false
+ */
+ protected boolean dayOfMonthFieldIsSetAll() {
+ return (dayOfMonthField.getExpression() instanceof Every || dayOfMonthField.getExpression() instanceof Always
+ || dayOfMonthField.getExpression() instanceof Between || dayOfMonthField.getExpression() instanceof And
+ || dayOfMonthField.getExpression() instanceof On);
+ }
+
+ /**
+ * whether the day Of Month field has a value of every or always
+ *
+ * @return if day Of Month field has a value of every or always return true,else return false
+ */
+ protected boolean dayOfMonthFieldIsEvery() {
+ return (dayOfMonthField.getExpression() instanceof Every || dayOfMonthField.getExpression() instanceof Always);
+ }
+
+ /**
+ * whether month field has a value
+ *
+ * @return if month field has a value return true,else return false
+ */
+ protected boolean monthFieldIsSetAll() {
+ FieldExpression monthFieldExpression = monthField.getExpression();
+ return (monthFieldExpression instanceof Every || monthFieldExpression instanceof Always
+ || monthFieldExpression instanceof Between || monthFieldExpression instanceof And
+ || monthFieldExpression instanceof On);
+ }
+
+ /**
+ * whether the month field has a value of every or always
+ *
+ * @return if month field has a value of every or always return true,else return false
+ */
+ protected boolean monthFieldIsEvery() {
+ FieldExpression monthFieldExpression = monthField.getExpression();
+ return (monthFieldExpression instanceof Every || monthFieldExpression instanceof Always);
+ }
+
+ /**
+ * whether the day Of week field has a value
+ *
+ * @return if day Of week field has a value return true,else return false
+ */
+ protected boolean dayofWeekFieldIsSetAll() {
+ FieldExpression dayOfWeekFieldExpression = dayOfWeekField.getExpression();
+ return (dayOfWeekFieldExpression instanceof Every || dayOfWeekFieldExpression instanceof Always
+ || dayOfWeekFieldExpression instanceof Between || dayOfWeekFieldExpression instanceof And
+ || dayOfWeekFieldExpression instanceof On);
+ }
+
+ /**
+ * whether the day Of week field has a value of every or always
+ *
+ * @return if day Of week field has a value of every or always return true,else return false
+ */
+ protected boolean dayofWeekFieldIsEvery() {
+ FieldExpression dayOfWeekFieldExpression = dayOfWeekField.getExpression();
+ return (dayOfWeekFieldExpression instanceof Every || dayOfWeekFieldExpression instanceof Always);
}
- this.cron = cron;
- this.minField = cron.retrieve(CronFieldName.MINUTE);
- this.hourField = cron.retrieve(CronFieldName.HOUR);
- this.dayOfMonthField = cron.retrieve(CronFieldName.DAY_OF_MONTH);
- this.dayOfWeekField = cron.retrieve(CronFieldName.DAY_OF_WEEK);
- this.monthField = cron.retrieve(CronFieldName.MONTH);
- this.yearField = cron.retrieve(CronFieldName.YEAR);
- }
-
- /**
- * whether the minute field has a value
- * @return if minute field has a value return true,else return false
- */
- protected boolean minFiledIsSetAll(){
- FieldExpression minFieldExpression = minField.getExpression();
- return (minFieldExpression instanceof Every || minFieldExpression instanceof Always
- || minFieldExpression instanceof Between || minFieldExpression instanceof And
- || minFieldExpression instanceof On);
- }
-
-
- /**
- * whether the minute field has a value of every or always
- * @return if minute field has a value of every or always return true,else return false
- */
- protected boolean minFiledIsEvery(){
- FieldExpression minFieldExpression = minField.getExpression();
- return (minFieldExpression instanceof Every || minFieldExpression instanceof Always);
- }
-
- /**
- * whether the hour field has a value
- * @return if hour field has a value return true,else return false
- */
- protected boolean hourFiledIsSetAll(){
- FieldExpression hourFieldExpression = hourField.getExpression();
- return (hourFieldExpression instanceof Every || hourFieldExpression instanceof Always
- || hourFieldExpression instanceof Between || hourFieldExpression instanceof And
- || hourFieldExpression instanceof On);
- }
-
- /**
- * whether the hour field has a value of every or always
- * @return if hour field has a value of every or always return true,else return false
- */
- protected boolean hourFiledIsEvery(){
- FieldExpression hourFieldExpression = hourField.getExpression();
- return (hourFieldExpression instanceof Every || hourFieldExpression instanceof Always);
- }
-
- /**
- * whether the day Of month field has a value
- * @return if day Of month field has a value return true,else return false
- */
- protected boolean dayOfMonthFieldIsSetAll(){
- return (dayOfMonthField.getExpression() instanceof Every || dayOfMonthField.getExpression() instanceof Always
- || dayOfMonthField.getExpression() instanceof Between || dayOfMonthField.getExpression() instanceof And
- || dayOfMonthField.getExpression() instanceof On);
- }
-
-
- /**
- * whether the day Of Month field has a value of every or always
- * @return if day Of Month field has a value of every or always return true,else return false
- */
- protected boolean dayOfMonthFieldIsEvery(){
- return (dayOfMonthField.getExpression() instanceof Every || dayOfMonthField.getExpression() instanceof Always);
- }
-
- /**
- * whether month field has a value
- * @return if month field has a value return true,else return false
- */
- protected boolean monthFieldIsSetAll(){
- FieldExpression monthFieldExpression = monthField.getExpression();
- return (monthFieldExpression instanceof Every || monthFieldExpression instanceof Always
- || monthFieldExpression instanceof Between || monthFieldExpression instanceof And
- || monthFieldExpression instanceof On);
- }
-
- /**
- * whether the month field has a value of every or always
- * @return if month field has a value of every or always return true,else return false
- */
- protected boolean monthFieldIsEvery(){
- FieldExpression monthFieldExpression = monthField.getExpression();
- return (monthFieldExpression instanceof Every || monthFieldExpression instanceof Always);
- }
-
- /**
- * whether the day Of week field has a value
- * @return if day Of week field has a value return true,else return false
- */
- protected boolean dayofWeekFieldIsSetAll(){
- FieldExpression dayOfWeekFieldExpression = dayOfWeekField.getExpression();
- return (dayOfWeekFieldExpression instanceof Every || dayOfWeekFieldExpression instanceof Always
- || dayOfWeekFieldExpression instanceof Between || dayOfWeekFieldExpression instanceof And
- || dayOfWeekFieldExpression instanceof On);
- }
-
- /**
- * whether the day Of week field has a value of every or always
- * @return if day Of week field has a value of every or always return true,else return false
- */
- protected boolean dayofWeekFieldIsEvery(){
- FieldExpression dayOfWeekFieldExpression = dayOfWeekField.getExpression();
- return (dayOfWeekFieldExpression instanceof Every || dayOfWeekFieldExpression instanceof Always);
- }
-
- /**
- * get cycle enum
- * @return CycleEnum
- */
- protected abstract CycleEnum getCycle();
-
- /**
- * get mini level cycle enum
- * @return CycleEnum
- */
- protected abstract CycleEnum getMiniCycle();
+ /**
+ * get cycle enum
+ *
+ * @return CycleEnum
+ */
+ protected abstract CycleEnum getCycle();
+
+ /**
+ * get mini level cycle enum
+ *
+ * @return CycleEnum
+ */
+ protected abstract CycleEnum getMiniCycle();
}
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/AbstractZKClient.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/AbstractZKClient.java
index 8a7d891c2e..37d8f10c93 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/AbstractZKClient.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/AbstractZKClient.java
@@ -14,322 +14,329 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.dolphinscheduler.service.zk;
-import org.apache.curator.framework.recipes.locks.InterProcessMutex;
+import static org.apache.dolphinscheduler.common.Constants.ADD_ZK_OP;
+import static org.apache.dolphinscheduler.common.Constants.COLON;
+import static org.apache.dolphinscheduler.common.Constants.DELETE_ZK_OP;
+import static org.apache.dolphinscheduler.common.Constants.DIVISION_STRING;
+import static org.apache.dolphinscheduler.common.Constants.MASTER_PREFIX;
+import static org.apache.dolphinscheduler.common.Constants.SINGLE_SLASH;
+import static org.apache.dolphinscheduler.common.Constants.UNDERLINE;
+import static org.apache.dolphinscheduler.common.Constants.WORKER_PREFIX;
+
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.ResInfo;
import org.apache.dolphinscheduler.common.utils.StringUtils;
+
+import org.apache.curator.framework.recipes.locks.InterProcessMutex;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
-import java.util.*;
-
-import static org.apache.dolphinscheduler.common.Constants.*;
-
/**
* abstract zookeeper client
*/
@Component
public abstract class AbstractZKClient extends ZookeeperCachedOperator {
- private static final Logger logger = LoggerFactory.getLogger(AbstractZKClient.class);
-
-
- /**
- * remove dead server by host
- * @param host host
- * @param serverType serverType
- * @throws Exception
- */
- public void removeDeadServerByHost(String host, String serverType) throws Exception {
- List deadServers = super.getChildrenKeys(getDeadZNodeParentPath());
- for(String serverPath : deadServers){
- if(serverPath.startsWith(serverType+UNDERLINE+host)){
- String server = getDeadZNodeParentPath() + SINGLE_SLASH + serverPath;
- super.remove(server);
- logger.info("{} server {} deleted from zk dead server path success" , serverType , host);
- }
- }
- }
-
-
- /**
- * opType(add): if find dead server , then add to zk deadServerPath
- * opType(delete): delete path from zk
- *
- * @param zNode node path
- * @param zkNodeType master or worker
- * @param opType delete or add
- * @throws Exception errors
- */
- public void handleDeadServer(String zNode, ZKNodeType zkNodeType, String opType) throws Exception {
- String host = getHostByEventDataPath(zNode);
- String type = (zkNodeType == ZKNodeType.MASTER) ? MASTER_PREFIX : WORKER_PREFIX;
-
- //check server restart, if restart , dead server path in zk should be delete
- if(opType.equals(DELETE_ZK_OP)){
- removeDeadServerByHost(host, type);
-
- }else if(opType.equals(ADD_ZK_OP)){
- String deadServerPath = getDeadZNodeParentPath() + SINGLE_SLASH + type + UNDERLINE + host;
- if(!super.isExisted(deadServerPath)){
- //add dead server info to zk dead server path : /dead-servers/
-
- super.persist(deadServerPath,(type + UNDERLINE + host));
-
- logger.info("{} server dead , and {} added to zk dead server path success" ,
- zkNodeType.toString(), zNode);
- }
- }
-
- }
-
- /**
- * get active master num
- * @return active master number
- */
- public int getActiveMasterNum(){
- List childrenList = new ArrayList<>();
- try {
- // read master node parent path from conf
- if(super.isExisted(getZNodeParentPath(ZKNodeType.MASTER))){
- childrenList = super.getChildrenKeys(getZNodeParentPath(ZKNodeType.MASTER));
- }
- } catch (Exception e) {
- logger.error("getActiveMasterNum error",e);
- }
- return childrenList.size();
- }
-
- /**
- *
- * @return zookeeper quorum
- */
- public String getZookeeperQuorum(){
- return getZookeeperConfig().getServerList();
- }
-
- /**
- * get server list.
- * @param zkNodeType zookeeper node type
- * @return server list
- */
- public List getServersList(ZKNodeType zkNodeType){
- Map masterMap = getServerMaps(zkNodeType);
- String parentPath = getZNodeParentPath(zkNodeType);
-
- List masterServers = new ArrayList<>();
- for (Map.Entry entry : masterMap.entrySet()) {
- Server masterServer = ResInfo.parseHeartbeatForZKInfo(entry.getValue());
- if(masterServer == null){
- continue;
- }
- String key = entry.getKey();
- masterServer.setZkDirectory(parentPath + "/"+ key);
- //set host and port
- String[] hostAndPort=key.split(COLON);
- String[] hosts=hostAndPort[0].split(DIVISION_STRING);
- // fetch the last one
- masterServer.setHost(hosts[hosts.length-1]);
- masterServer.setPort(Integer.parseInt(hostAndPort[1]));
- masterServers.add(masterServer);
- }
- return masterServers;
- }
-
- /**
- * get master server list map.
- * @param zkNodeType zookeeper node type
- * @return result : {host : resource info}
- */
- public Map getServerMaps(ZKNodeType zkNodeType){
-
- Map masterMap = new HashMap<>();
- try {
- String path = getZNodeParentPath(zkNodeType);
- List serverList = super.getChildrenKeys(path);
- if(zkNodeType == ZKNodeType.WORKER){
- List workerList = new ArrayList<>();
- for(String group : serverList){
- List groupServers = super.getChildrenKeys(path + Constants.SLASH + group);
- for(String groupServer : groupServers){
- workerList.add(group + Constants.SLASH + groupServer);
- }
- }
- serverList = workerList;
- }
- for(String server : serverList){
- masterMap.putIfAbsent(server, super.get(path + Constants.SLASH + server));
- }
- } catch (Exception e) {
- logger.error("get server list failed", e);
- }
-
- return masterMap;
- }
-
- /**
- * check the zookeeper node already exists
- * @param host host
- * @param zkNodeType zookeeper node type
- * @return true if exists
- */
- public boolean checkZKNodeExists(String host, ZKNodeType zkNodeType) {
- String path = getZNodeParentPath(zkNodeType);
- if(StringUtils.isEmpty(path)){
- logger.error("check zk node exists error, host:{}, zk node type:{}",
- host, zkNodeType.toString());
- return false;
- }
- Map serverMaps = getServerMaps(zkNodeType);
- for(String hostKey : serverMaps.keySet()){
- if(hostKey.contains(host)){
- return true;
- }
- }
- return false;
- }
-
- /**
- *
- * @return get worker node parent path
- */
- protected String getWorkerZNodeParentPath(){
- return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_WORKERS;
- }
-
- /**
- *
- * @return get master node parent path
- */
- protected String getMasterZNodeParentPath(){
- return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_MASTERS;
- }
-
- /**
- *
- * @return get master lock path
- */
- public String getMasterLockPath(){
- return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS;
- }
-
- /**
- *
- * @param zkNodeType zookeeper node type
- * @return get zookeeper node parent path
- */
- public String getZNodeParentPath(ZKNodeType zkNodeType) {
- String path = "";
- switch (zkNodeType){
- case MASTER:
- return getMasterZNodeParentPath();
- case WORKER:
- return getWorkerZNodeParentPath();
- case DEAD_SERVER:
- return getDeadZNodeParentPath();
- default:
- break;
- }
- return path;
- }
-
- /**
- *
- * @return get dead server node parent path
- */
- protected String getDeadZNodeParentPath(){
- return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS;
- }
-
- /**
- *
- * @return get master start up lock path
- */
- public String getMasterStartUpLockPath(){
- return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS;
- }
-
- /**
- *
- * @return get master failover lock path
- */
- public String getMasterFailoverLockPath(){
- return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS;
- }
-
- /**
- *
- * @return get worker failover lock path
- */
- public String getWorkerFailoverLockPath(){
- return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS;
- }
-
- /**
- * release mutex
- * @param mutex mutex
- */
- public void releaseMutex(InterProcessMutex mutex) {
- if (mutex != null){
- try {
- mutex.release();
- } catch (Exception e) {
- if("instance must be started before calling this method".equals(e.getMessage())){
- logger.warn("lock release");
- }else{
- logger.error("lock release failed",e);
- }
-
- }
- }
- }
-
- /**
- * init system znode
- */
- protected void initSystemZNode(){
- try {
- persist(getMasterZNodeParentPath(), "");
- persist(getWorkerZNodeParentPath(), "");
- persist(getDeadZNodeParentPath(), "");
-
- logger.info("initialize server nodes success.");
- } catch (Exception e) {
- logger.error("init system znode failed",e);
- }
- }
-
- /**
- * get host ip, string format: masterParentPath/ip
- * @param path path
- * @return host ip, string format: masterParentPath/ip
- */
- protected String getHostByEventDataPath(String path) {
- if(StringUtils.isEmpty(path)){
- logger.error("empty path!");
- return "";
- }
- String[] pathArray = path.split(SINGLE_SLASH);
- if(pathArray.length < 1){
- logger.error("parse ip error: {}", path);
- return "";
- }
- return pathArray[pathArray.length - 1];
-
- }
-
- @Override
- public String toString() {
- return "AbstractZKClient{" +
- "zkClient=" + getZkClient() +
- ", deadServerZNodeParentPath='" + getZNodeParentPath(ZKNodeType.DEAD_SERVER) + '\'' +
- ", masterZNodeParentPath='" + getZNodeParentPath(ZKNodeType.MASTER) + '\'' +
- ", workerZNodeParentPath='" + getZNodeParentPath(ZKNodeType.WORKER) + '\'' +
- '}';
- }
-}
\ No newline at end of file
+ private static final Logger logger = LoggerFactory.getLogger(AbstractZKClient.class);
+
+ /**
+ * remove dead server by host
+ *
+ * @param host host
+ * @param serverType serverType
+ */
+ public void removeDeadServerByHost(String host, String serverType) {
+ List deadServers = super.getChildrenKeys(getDeadZNodeParentPath());
+ for (String serverPath : deadServers) {
+ if (serverPath.startsWith(serverType + UNDERLINE + host)) {
+ String server = getDeadZNodeParentPath() + SINGLE_SLASH + serverPath;
+ super.remove(server);
+ logger.info("{} server {} deleted from zk dead server path success", serverType, host);
+ }
+ }
+ }
+
+ /**
+ * opType(add): if find dead server , then add to zk deadServerPath
+ * opType(delete): delete path from zk
+ *
+ * @param zNode node path
+ * @param zkNodeType master or worker
+ * @param opType delete or add
+ */
+ public void handleDeadServer(String zNode, ZKNodeType zkNodeType, String opType) {
+ String host = getHostByEventDataPath(zNode);
+ String type = (zkNodeType == ZKNodeType.MASTER) ? MASTER_PREFIX : WORKER_PREFIX;
+
+ //check server restart, if restart , dead server path in zk should be delete
+ if (opType.equals(DELETE_ZK_OP)) {
+ removeDeadServerByHost(host, type);
+
+ } else if (opType.equals(ADD_ZK_OP)) {
+ String deadServerPath = getDeadZNodeParentPath() + SINGLE_SLASH + type + UNDERLINE + host;
+ if (!super.isExisted(deadServerPath)) {
+ //add dead server info to zk dead server path : /dead-servers/
+
+ super.persist(deadServerPath, (type + UNDERLINE + host));
+
+ logger.info("{} server dead , and {} added to zk dead server path success",
+ zkNodeType, zNode);
+ }
+ }
+
+ }
+
+ /**
+ * get active master num
+ *
+ * @return active master number
+ */
+ public int getActiveMasterNum() {
+ List childrenList = new ArrayList<>();
+ try {
+ // read master node parent path from conf
+ if (super.isExisted(getZNodeParentPath(ZKNodeType.MASTER))) {
+ childrenList = super.getChildrenKeys(getZNodeParentPath(ZKNodeType.MASTER));
+ }
+ } catch (Exception e) {
+ logger.error("getActiveMasterNum error", e);
+ }
+ return childrenList.size();
+ }
+
+ /**
+ * @return zookeeper quorum
+ */
+ public String getZookeeperQuorum() {
+ return getZookeeperConfig().getServerList();
+ }
+
+ /**
+ * get server list.
+ *
+ * @param zkNodeType zookeeper node type
+ * @return server list
+ */
+ public List getServersList(ZKNodeType zkNodeType) {
+ Map masterMap = getServerMaps(zkNodeType);
+ String parentPath = getZNodeParentPath(zkNodeType);
+
+ List masterServers = new ArrayList<>();
+ for (Map.Entry entry : masterMap.entrySet()) {
+ Server masterServer = ResInfo.parseHeartbeatForZKInfo(entry.getValue());
+ if (masterServer == null) {
+ continue;
+ }
+ String key = entry.getKey();
+ masterServer.setZkDirectory(parentPath + "/" + key);
+ //set host and port
+ String[] hostAndPort = key.split(COLON);
+ String[] hosts = hostAndPort[0].split(DIVISION_STRING);
+ // fetch the last one
+ masterServer.setHost(hosts[hosts.length - 1]);
+ masterServer.setPort(Integer.parseInt(hostAndPort[1]));
+ masterServers.add(masterServer);
+ }
+ return masterServers;
+ }
+
+ /**
+ * get master server list map.
+ *
+ * @param zkNodeType zookeeper node type
+ * @return result : {host : resource info}
+ */
+ public Map getServerMaps(ZKNodeType zkNodeType) {
+
+ Map masterMap = new HashMap<>();
+ try {
+ String path = getZNodeParentPath(zkNodeType);
+ List serverList = super.getChildrenKeys(path);
+ if (zkNodeType == ZKNodeType.WORKER) {
+ List workerList = new ArrayList<>();
+ for (String group : serverList) {
+ List groupServers = super.getChildrenKeys(path + Constants.SLASH + group);
+ for (String groupServer : groupServers) {
+ workerList.add(group + Constants.SLASH + groupServer);
+ }
+ }
+ serverList = workerList;
+ }
+ for (String server : serverList) {
+ masterMap.putIfAbsent(server, super.get(path + Constants.SLASH + server));
+ }
+ } catch (Exception e) {
+ logger.error("get server list failed", e);
+ }
+
+ return masterMap;
+ }
+
+ /**
+ * check the zookeeper node already exists
+ *
+ * @param host host
+ * @param zkNodeType zookeeper node type
+ * @return true if exists
+ */
+ public boolean checkZKNodeExists(String host, ZKNodeType zkNodeType) {
+ String path = getZNodeParentPath(zkNodeType);
+ if (StringUtils.isEmpty(path)) {
+ logger.error("check zk node exists error, host:{}, zk node type:{}",
+ host, zkNodeType);
+ return false;
+ }
+ Map serverMaps = getServerMaps(zkNodeType);
+ for (String hostKey : serverMaps.keySet()) {
+ if (hostKey.contains(host)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * @return get worker node parent path
+ */
+ protected String getWorkerZNodeParentPath() {
+ return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_WORKERS;
+ }
+
+ /**
+ * @return get master node parent path
+ */
+ protected String getMasterZNodeParentPath() {
+ return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_MASTERS;
+ }
+
+ /**
+ * @return get master lock path
+ */
+ public String getMasterLockPath() {
+ return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS;
+ }
+
+ /**
+ * @param zkNodeType zookeeper node type
+ * @return get zookeeper node parent path
+ */
+ public String getZNodeParentPath(ZKNodeType zkNodeType) {
+ String path = "";
+ switch (zkNodeType) {
+ case MASTER:
+ return getMasterZNodeParentPath();
+ case WORKER:
+ return getWorkerZNodeParentPath();
+ case DEAD_SERVER:
+ return getDeadZNodeParentPath();
+ default:
+ break;
+ }
+ return path;
+ }
+
+ /**
+ * @return get dead server node parent path
+ */
+ protected String getDeadZNodeParentPath() {
+ return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS;
+ }
+
+ /**
+ * @return get master start up lock path
+ */
+ public String getMasterStartUpLockPath() {
+ return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS;
+ }
+
+ /**
+ * @return get master failover lock path
+ */
+ public String getMasterFailoverLockPath() {
+ return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS;
+ }
+
+ /**
+ * @return get worker failover lock path
+ */
+ public String getWorkerFailoverLockPath() {
+ return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS;
+ }
+
+ /**
+ * release mutex
+ *
+ * @param mutex mutex
+ */
+ public void releaseMutex(InterProcessMutex mutex) {
+ if (mutex != null) {
+ try {
+ mutex.release();
+ } catch (Exception e) {
+ if ("instance must be started before calling this method".equals(e.getMessage())) {
+ logger.warn("lock release");
+ } else {
+ logger.error("lock release failed", e);
+ }
+
+ }
+ }
+ }
+
+ /**
+ * init system znode
+ */
+ protected void initSystemZNode() {
+ try {
+ persist(getMasterZNodeParentPath(), "");
+ persist(getWorkerZNodeParentPath(), "");
+ persist(getDeadZNodeParentPath(), "");
+
+ logger.info("initialize server nodes success.");
+ } catch (Exception e) {
+ logger.error("init system znode failed", e);
+ }
+ }
+
+ /**
+ * get host ip, string format: masterParentPath/ip
+ *
+ * @param path path
+ * @return host ip, string format: masterParentPath/ip
+ */
+ protected String getHostByEventDataPath(String path) {
+ if (StringUtils.isEmpty(path)) {
+ logger.error("empty path!");
+ return "";
+ }
+ String[] pathArray = path.split(SINGLE_SLASH);
+ if (pathArray.length < 1) {
+ logger.error("parse ip error: {}", path);
+ return "";
+ }
+ return pathArray[pathArray.length - 1];
+
+ }
+
+ @Override
+ public String toString() {
+ return "AbstractZKClient{"
+ + "zkClient=" + getZkClient()
+ + ", deadServerZNodeParentPath='" + getZNodeParentPath(ZKNodeType.DEAD_SERVER) + '\''
+ + ", masterZNodeParentPath='" + getZNodeParentPath(ZKNodeType.MASTER) + '\''
+ + ", workerZNodeParentPath='" + getZNodeParentPath(ZKNodeType.WORKER) + '\''
+ + '}';
+ }
+}
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/CuratorZookeeperClient.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/CuratorZookeeperClient.java
index 5a04c5a23b..e25a22f031 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/CuratorZookeeperClient.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/CuratorZookeeperClient.java
@@ -14,9 +14,14 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.dolphinscheduler.service.zk;
-import org.apache.commons.lang.StringUtils;
+import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull;
+
+import org.apache.dolphinscheduler.common.utils.StringUtils;
+import org.apache.dolphinscheduler.service.exceptions.ServiceException;
+
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.api.ACLProvider;
@@ -25,18 +30,16 @@ import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
-import java.nio.charset.StandardCharsets;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull;
-
/**
* Shared Curator zookeeper client
*/
@@ -49,7 +52,6 @@ public class CuratorZookeeperClient implements InitializingBean {
private CuratorFramework zkClient;
-
@Override
public void afterPropertiesSet() throws Exception {
this.zkClient = buildClient();
@@ -91,7 +93,7 @@ public class CuratorZookeeperClient implements InitializingBean {
zkClient.blockUntilConnected(30, TimeUnit.SECONDS);
} catch (final Exception ex) {
- throw new RuntimeException(ex);
+ throw new ServiceException(ex);
}
return zkClient;
}
@@ -123,4 +125,4 @@ public class CuratorZookeeperClient implements InitializingBean {
public CuratorFramework getZkClient() {
return zkClient;
}
-}
\ No newline at end of file
+}
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZKServer.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZKServer.java
index c7a53ebdc0..7ac23a3c4d 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZKServer.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZKServer.java
@@ -14,19 +14,22 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.dolphinscheduler.service.zk;
import org.apache.dolphinscheduler.common.utils.StringUtils;
+import org.apache.dolphinscheduler.service.exceptions.ServiceException;
+
import org.apache.zookeeper.server.ZooKeeperServer;
import org.apache.zookeeper.server.ZooKeeperServerMain;
import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* just speed experience version
@@ -51,10 +54,10 @@ public class ZKServer {
ZKServer zkServer;
if (args.length == 0) {
zkServer = new ZKServer();
- } else if (args.length == 1){
- zkServer = new ZKServer(Integer.valueOf(args[0]), "");
+ } else if (args.length == 1) {
+ zkServer = new ZKServer(Integer.parseInt(args[0]), "");
} else {
- zkServer = new ZKServer(Integer.valueOf(args[0]), args[1]);
+ zkServer = new ZKServer(Integer.parseInt(args[0]), args[1]);
}
zkServer.registerHook();
zkServer.start();
@@ -73,7 +76,7 @@ public class ZKServer {
}
private void registerHook() {
- /**
+ /*
* register hooks, which are called before the process exits
*/
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
@@ -90,7 +93,7 @@ public class ZKServer {
}
}
- public boolean isStarted(){
+ public boolean isStarted() {
return isStarted.get();
}
@@ -119,19 +122,19 @@ public class ZKServer {
if (file.exists()) {
logger.warn("The path of zk server exists");
}
- logger.info("zk server starting, data dir path:{}" , zkDataDir);
- startLocalZkServer(port, zkDataDir, ZooKeeperServer.DEFAULT_TICK_TIME,"60");
+ logger.info("zk server starting, data dir path:{}", zkDataDir);
+ startLocalZkServer(port, zkDataDir, ZooKeeperServer.DEFAULT_TICK_TIME, "60");
}
/**
* Starts a local Zk instance
*
- * @param port The port to listen on
+ * @param port The port to listen on
* @param dataDirPath The path for the Zk data directory
- * @param tickTime zk tick time
- * @param maxClientCnxns zk max client connections
+ * @param tickTime zk tick time
+ * @param maxClientCnxns zk max client connections
*/
- private void startLocalZkServer(final int port, final String dataDirPath,final int tickTime,String maxClientCnxns) {
+ private void startLocalZkServer(final int port, final String dataDirPath, final int tickTime, String maxClientCnxns) {
if (isStarted.compareAndSet(false, true)) {
zooKeeperServerMain = new PublicZooKeeperServerMain();
logger.info("Zookeeper data path : {} ", dataDirPath);
@@ -144,8 +147,7 @@ public class ZKServer {
zooKeeperServerMain.initializeAndRun(args);
} catch (QuorumPeerConfig.ConfigException | IOException e) {
- logger.warn("Caught exception while starting ZK", e);
- throw new RuntimeException(e);
+ throw new ServiceException("Caught exception while starting ZK", e);
}
}
}
@@ -159,7 +161,7 @@ public class ZKServer {
logger.info("zk server stopped");
} catch (Exception e) {
- logger.error("Failed to stop ZK ",e);
+ logger.error("Failed to stop ZK ", e);
}
}
@@ -180,8 +182,7 @@ public class ZKServer {
org.apache.commons.io.FileUtils.deleteDirectory(new File(dataDir));
}
} catch (Exception e) {
- logger.warn("Caught exception while stopping ZK server", e);
- throw new RuntimeException(e);
+ throw new ServiceException("Caught exception while starting ZK", e);
}
}
}
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperCachedOperator.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperCachedOperator.java
index 6dfce79a3a..88c339b045 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperCachedOperator.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperCachedOperator.java
@@ -14,21 +14,24 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.dolphinscheduler.service.zk;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
+import org.apache.dolphinscheduler.service.exceptions.ServiceException;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.ChildData;
import org.apache.curator.framework.recipes.cache.TreeCache;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.curator.framework.recipes.cache.TreeCacheListener;
+
+import java.nio.charset.StandardCharsets;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
-import java.nio.charset.StandardCharsets;
-
@Component
public class ZookeeperCachedOperator extends ZookeeperOperator {
@@ -36,6 +39,7 @@ public class ZookeeperCachedOperator extends ZookeeperOperator {
private TreeCache treeCache;
+
/**
* register a unified listener of /${dsRoot},
*/
@@ -59,14 +63,16 @@ public class ZookeeperCachedOperator extends ZookeeperOperator {
treeCache.start();
} catch (Exception e) {
logger.error("add listener to zk path: {} failed", getZookeeperConfig().getDsRoot());
- throw new RuntimeException(e);
+ throw new ServiceException(e);
}
}
//for sub class
- protected void dataChanged(final CuratorFramework client, final TreeCacheEvent event, final String path){}
+ protected void dataChanged(final CuratorFramework client, final TreeCacheEvent event, final String path) {
+ // Used by sub class
+ }
- public String getFromCache(final String cachePath, final String key) {
+ public String getFromCache(final String key) {
ChildData resultInCache = treeCache.getCurrentData(key);
if (null != resultInCache) {
return null == resultInCache.getData() ? null : new String(resultInCache.getData(), StandardCharsets.UTF_8);
@@ -74,11 +80,11 @@ public class ZookeeperCachedOperator extends ZookeeperOperator {
return null;
}
- public TreeCache getTreeCache(final String cachePath) {
+ public TreeCache getTreeCache() {
return treeCache;
}
- public void addListener(TreeCacheListener listener){
+ public void addListener(TreeCacheListener listener) {
this.treeCache.getListenable().addListener(listener);
}
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java
index e7b049f8bf..8a219837b7 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java
@@ -14,13 +14,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.dolphinscheduler.service.zk;
-import org.apache.commons.lang.StringUtils;
+import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull;
+
+import org.apache.dolphinscheduler.common.utils.StringUtils;
+import org.apache.dolphinscheduler.service.exceptions.ServiceException;
+
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.api.ACLProvider;
-import org.apache.curator.framework.api.transaction.CuratorOp;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.curator.utils.CloseableUtils;
@@ -29,18 +33,16 @@ import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
+
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
-import java.nio.charset.StandardCharsets;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull;
-
/**
* zk base operator
*/
@@ -64,19 +66,23 @@ public class ZookeeperOperator implements InitializingBean {
/**
* this method is for sub class,
*/
- protected void registerListener(){}
+ protected void registerListener() {
+ // Used by sub class
+ }
- protected void treeCacheStart(){}
+ protected void treeCacheStart() {
+ // Used by sub class
+ }
public void initStateLister() {
checkNotNull(zkClient);
zkClient.getConnectionStateListenable().addListener((client, newState) -> {
- if(newState == ConnectionState.LOST){
+ if (newState == ConnectionState.LOST) {
logger.error("connection lost from zookeeper");
- } else if(newState == ConnectionState.RECONNECTED){
+ } else if (newState == ConnectionState.RECONNECTED) {
logger.info("reconnected to zookeeper");
- } else if(newState == ConnectionState.SUSPENDED){
+ } else if (newState == ConnectionState.SUSPENDED) {
logger.warn("connection SUSPENDED to zookeeper");
}
});
@@ -85,7 +91,8 @@ public class ZookeeperOperator implements InitializingBean {
private CuratorFramework buildClient() {
logger.info("zookeeper registry center init, server lists is: {}.", zookeeperConfig.getServerList());
- CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder().ensembleProvider(new DefaultEnsembleProvider(checkNotNull(zookeeperConfig.getServerList(),"zookeeper quorum can't be null")))
+ CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder().ensembleProvider(new DefaultEnsembleProvider(checkNotNull(zookeeperConfig.getServerList(),
+ "zookeeper quorum can't be null")))
.retryPolicy(new ExponentialBackoffRetry(zookeeperConfig.getBaseSleepTimeMs(), zookeeperConfig.getMaxRetries(), zookeeperConfig.getMaxSleepMs()));
//these has default value
@@ -114,7 +121,7 @@ public class ZookeeperOperator implements InitializingBean {
try {
zkClient.blockUntilConnected();
} catch (final Exception ex) {
- throw new RuntimeException(ex);
+ throw new ServiceException(ex);
}
return zkClient;
}
@@ -138,12 +145,12 @@ public class ZookeeperOperator implements InitializingBean {
throw new IllegalStateException(ex);
} catch (Exception ex) {
logger.error("getChildrenKeys key : {}", key, ex);
- throw new RuntimeException(ex);
+ throw new ServiceException(ex);
}
}
- public boolean hasChildren(final String key){
- Stat stat ;
+ public boolean hasChildren(final String key) {
+ Stat stat;
try {
stat = zkClient.checkExists().forPath(key);
return stat.getNumChildren() >= 1;
@@ -241,4 +248,4 @@ public class ZookeeperOperator implements InitializingBean {
public void close() {
CloseableUtils.closeQuietly(zkClient);
}
-}
\ No newline at end of file
+}
From ad82bb021502c697cf744868ac26dd4d0ed1605e Mon Sep 17 00:00:00 2001
From: zhuangchong <37063904+zhuangchong@users.noreply.github.com>
Date: Fri, 29 Jan 2021 19:54:18 +0800
Subject: [PATCH 03/11] fix ui datasource dialog visible. (#4597)
---
.../src/js/conf/home/pages/datasource/pages/list/index.vue | 1 +
1 file changed, 1 insertion(+)
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/index.vue
index 04683ebe4e..545c6c0b70 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/index.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/index.vue
@@ -22,6 +22,7 @@
{{$t('Create Datasource')}}
From e54e42339aceef0eb747ee5e7b1213c47c8ff358 Mon Sep 17 00:00:00 2001
From: Tq <36755957+Tianqi-Dotes@users.noreply.github.com>
Date: Fri, 29 Jan 2021 21:33:58 +0800
Subject: [PATCH 04/11] Review readme (#4614)
* fix DS introduction in the README.md
* review README.md
* Update README.md
add some content
Co-authored-by: dailidong
---
README.md | 54 +++++++++++++++++++++++++-----------------------------
1 file changed, 25 insertions(+), 29 deletions(-)
diff --git a/README.md b/README.md
index ede405baa7..1d5ce11f0b 100644
--- a/README.md
+++ b/README.md
@@ -7,46 +7,44 @@ Dolphin Scheduler Official Website
[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apache-dolphinscheduler&metric=alert_status)](https://sonarcloud.io/dashboard?id=apache-dolphinscheduler)
-> Dolphin Scheduler for Big Data
-
[![Stargazers over time](https://starchart.cc/apache/incubator-dolphinscheduler.svg)](https://starchart.cc/apache/incubator-dolphinscheduler)
[![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md)
[![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md)
-### Design features:
+### Design Features:
-Dolphin Scheduler is a distributed and easy-to-extend visual DAG workflow scheduling system. It dedicates to solving the complex dependencies in data processing to make the scheduling system `out of the box` for the data processing process.
+DolphinScheduler is a distributed and extensible workflow scheduler platform with powerful DAG visual interfaces, dedicated to solving complex job dependencies in the data pipeline and providing multiple types of jobs available `out of the box`.
Its main objectives are as follows:
- Associate the tasks according to the dependencies of the tasks in a DAG graph, which can visualize the running state of the task in real-time.
- - Support many task types: Shell, MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Sub_Process, Procedure, etc.
- - Support process scheduling, dependency scheduling, manual scheduling, manual pause/stop/recovery, support for failed retry/alarm, recovery from specified nodes, Kill task, etc.
- - Support the priority of process & task, task failover, and task timeout alarm or failure.
- - Support process global parameters and node custom parameter settings.
- - Support online upload/download of resource files, management, etc. Support online file creation and editing.
- - Support task log online viewing and scrolling, online download log, etc.
- - Implement cluster HA, decentralize Master cluster and Worker cluster through Zookeeper.
+ - Support various task types: Shell, MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Sub_Process, Procedure, etc.
+ - Support scheduling of workflows and dependencies, manual scheduling to pause/stop/recover task, support failure task retry/alarm, recover specified nodes from failure, kill task, etc.
+ - Support the priority of workflows & tasks, task failover, and task timeout alarm or failure.
+ - Support workflow global parameters and node customized parameter settings.
+ - Support online upload/download/management of resource files, etc. Support online file creation and editing.
+ - Support task log online viewing and scrolling and downloading, etc.
+ - Have implemented cluster HA, decentralize Master cluster and Worker cluster through Zookeeper.
- Support the viewing of Master/Worker CPU load, memory, and CPU usage metrics.
- - Support presenting tree or Gantt chart of workflow history as well as the statistics results of task & process status in each workflow.
- - Support backfilling data.
+ - Support displaying workflow history in tree/Gantt chart, as well as statistical analysis on the task status & process status in each workflow.
+ - Support back-filling data.
- Support multi-tenant.
- Support internationalization.
- - There are more waiting for partners to explore...
+ - More features waiting for partners to explore...
-### What's in Dolphin Scheduler
+### What's in DolphinScheduler
Stability | Easy to use | Features | Scalability |
-- | -- | -- | --
-Decentralized multi-master and multi-worker | Visualization process defines key information such as task status, task type, retry times, task running machine, visual variables, and so on at a glance. | Support pause, recover operation | Support custom task types
-HA is supported by itself | All process definition operations are visualized, dragging tasks to draw DAGs, configuring data sources and resources. At the same time, for third-party systems, the API mode operation is provided. | Users on Dolphin Scheduler can achieve many-to-one or one-to-one mapping relationship through tenants and Hadoop users, which is very important for scheduling large data jobs. | The scheduler uses distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster. Master and Worker support dynamic online and offline.
-Overload processing: Overload processing: By using the task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured. Machine jam can be avoided with high tolerance to numbers of tasks cached in task queue. | One-click deployment | Support traditional shell tasks, and big data platform task scheduling: MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Procedure, Sub_Process | |
+Decentralized multi-master and multi-worker | Visualization of workflow key information, such as task status, task type, retry times, task operation machine information, visual variables, and so on at a glance. | Support pause, recover operation | Support customized task types
+support HA | Visualization of all workflow operations, dragging tasks to draw DAGs, configuring data sources and resources. At the same time, for third-party systems, provide API mode operations. | Users on DolphinScheduler can achieve many-to-one or one-to-one mapping relationship through tenants and Hadoop users, which is very important for scheduling large data jobs. | The scheduler supports distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster. Master and Worker support dynamic adjustment.
+Overload processing: By using the task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured. Machine jam can be avoided with high tolerance to numbers of tasks cached in task queue. | One-click deployment | Support traditional shell tasks, and big data platform task scheduling: MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Procedure, Sub_Process | |
-### System partial screenshot
+### User Interface Screenshots
![home page](https://user-images.githubusercontent.com/15833811/75218288-bf286400-57d4-11ea-8263-d639c6511d5f.jpg)
![dag](https://user-images.githubusercontent.com/15833811/75236750-3374fe80-57f9-11ea-857d-62a66a5a559d.png)
@@ -57,13 +55,9 @@ Overload processing: Overload processing: By using the task queue mechanism, the
![security](https://user-images.githubusercontent.com/15833811/75236441-bfd2f180-57f8-11ea-88bd-f24311e01b7e.png)
![treeview](https://user-images.githubusercontent.com/15833811/75217191-3fe56100-57d1-11ea-8856-f19180d9a879.png)
+### QuickStart in Docker
+Please referer the official website document:[[QuickStart in Docker](https://dolphinscheduler.apache.org/en-us/docs/1.3.4/user_doc/docker-deployment.html)]
-### Recent R&D plan
-The work plan of Dolphin Scheduler: [R&D plan](https://github.com/apache/incubator-dolphinscheduler/projects/1), which `In Develop` card shows the features that are currently being developed and TODO card lists what needs to be done(including feature ideas).
-
-### How to contribute
-
-Welcome to participate in contributing, please refer to this website to find out more: [[How to contribute](https://dolphinscheduler.apache.org/en-us/docs/development/contribute.html)]
### How to Build
@@ -80,14 +74,16 @@ dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release
### Thanks
-Dolphin Scheduler is based on a lot of excellent open-source projects, such as google guava, guice, grpc, netty, ali bonecp, quartz, and many open-source projects of Apache and so on.
-We would like to express our deep gratitude to all the open-source projects which contribute to making the dream of Dolphin Scheduler comes true. We hope that we are not only the beneficiaries of open-source, but also give back to the community. Besides, we expect the partners who have the same passion and conviction to open-source will join in and contribute to the open-source community!
-
+DolphinScheduler is based on a lot of excellent open-source projects, such as google guava, guice, grpc, netty, ali bonecp, quartz, and many open-source projects of Apache and so on.
+We would like to express our deep gratitude to all the open-source projects used in Dolphin Scheduler. We hope that we are not only the beneficiaries of open-source, but also give back to the community. Besides, we hope everyone who have the same enthusiasm and passion for open source could join in and contribute to the open-source community!
### Get Help
-1. Submit an issue
+1. Submit an [[issue](https://github.com/apache/incubator-dolphinscheduler/issues/new/choose)]
1. Subscribe to the mail list: https://dolphinscheduler.apache.org/en-us/docs/development/subscribe.html, then email dev@dolphinscheduler.apache.org
+### How to Contribute
+The community welcomes everyone to participate in contributing, please refer to this website to find out more: [[How to contribute](https://dolphinscheduler.apache.org/en-us/community/development/contribute.html)]
+
### License
Please refer to the [LICENSE](https://github.com/apache/incubator-dolphinscheduler/blob/dev/LICENSE) file.
From 0e43b0d82a110e10e95858c69d9e3e0fd121edd5 Mon Sep 17 00:00:00 2001
From: cooper <1322849632@qq.com>
Date: Fri, 29 Jan 2021 22:08:58 +0800
Subject: [PATCH 05/11] Remove duplicate
license(LICENSE-@form-create-element-ui ) (#4630)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: 李长福
---
.../LICENSE-@form-create-element-ui | 21 -------------------
1 file changed, 21 deletions(-)
delete mode 100644 dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-@form-create-element-ui
diff --git a/dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-@form-create-element-ui b/dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-@form-create-element-ui
deleted file mode 100644
index 468a05fc1d..0000000000
--- a/dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-@form-create-element-ui
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2018 xaboy
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
From d80f0614efb426b92052514fcc2d7f27259e4043 Mon Sep 17 00:00:00 2001
From: "felix.wang" <59079269+felix-thinkingdata@users.noreply.github.com>
Date: Fri, 29 Jan 2021 22:41:13 +0800
Subject: [PATCH 06/11] [Feature-#4350][Alert]support feishu plugin (#4594)
* feishu robot
* Add Feishu to the plugin packing process
---
.../dolphinscheduler-alert-feishu/pom.xml | 82 +++++++
.../alert/feishu/FeiShuAlertChannel.java | 37 +++
.../feishu/FeiShuAlertChannelFactory.java | 82 +++++++
.../alert/feishu/FeiShuAlertPlugin.java | 30 +++
.../alert/feishu/FeiShuParamsConstants.java | 49 ++++
.../plugin/alert/feishu/FeiShuSender.java | 223 ++++++++++++++++++
.../plugin/alert/feishu/HttpRequestUtil.java | 50 ++++
.../feishu/FeiShuAlertChannelFactoryTest.java | 45 ++++
.../plugin/alert/feishu/FeiShuSenderTest.java | 75 ++++++
dolphinscheduler-alert-plugin/pom.xml | 1 +
.../src/main/provisio/dolphinscheduler.xml | 5 +
pom.xml | 2 +
12 files changed, 681 insertions(+)
create mode 100644 dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/pom.xml
create mode 100644 dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannel.java
create mode 100644 dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactory.java
create mode 100644 dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertPlugin.java
create mode 100644 dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuParamsConstants.java
create mode 100644 dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSender.java
create mode 100644 dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/HttpRequestUtil.java
create mode 100644 dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java
create mode 100644 dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSenderTest.java
diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/pom.xml b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/pom.xml
new file mode 100644
index 0000000000..44d4cdbb07
--- /dev/null
+++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/pom.xml
@@ -0,0 +1,82 @@
+
+
+
+
+ dolphinscheduler-alert-plugin
+ org.apache.dolphinscheduler
+ 1.3.4-SNAPSHOT
+
+ 4.0.0
+
+ org.apache.dolphinscheduler
+ dolphinscheduler-alert-feishu
+ dolphinscheduler-plugin
+
+
+
+
+ org.apache.dolphinscheduler
+ dolphinscheduler-spi
+ provided
+
+
+ org.apache.httpcomponents
+ httpclient
+
+
+ com.google.guava
+ guava
+
+
+
+ ch.qos.logback
+ logback-classic
+
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+ com.fasterxml.jackson.core
+ jackson-annotations
+ provided
+
+
+
+ junit
+ junit
+ test
+
+
+
+ org.mockito
+ mockito-core
+ jar
+ test
+
+
+
+
+ dolphinscheduler-alert-feishu-${project.version}
+
+
+
\ No newline at end of file
diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannel.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannel.java
new file mode 100644
index 0000000000..8a195e08f8
--- /dev/null
+++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannel.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.dolphinscheduler.plugin.alert.feishu;
+
+import org.apache.dolphinscheduler.spi.alert.AlertChannel;
+import org.apache.dolphinscheduler.spi.alert.AlertData;
+import org.apache.dolphinscheduler.spi.alert.AlertInfo;
+import org.apache.dolphinscheduler.spi.alert.AlertResult;
+import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
+
+import java.util.Map;
+
+public class FeiShuAlertChannel implements AlertChannel {
+ @Override
+ public AlertResult process(AlertInfo alertInfo) {
+
+ AlertData alertData = alertInfo.getAlertData();
+ String alertParams = alertInfo.getAlertParams();
+ Map paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams);
+ return new FeiShuSender(paramsMap).sendFeiShuMsg(alertData);
+ }
+}
diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactory.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactory.java
new file mode 100644
index 0000000000..0e863f95d4
--- /dev/null
+++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactory.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.dolphinscheduler.plugin.alert.feishu;
+
+import org.apache.dolphinscheduler.spi.alert.AlertChannel;
+import org.apache.dolphinscheduler.spi.alert.AlertChannelFactory;
+import org.apache.dolphinscheduler.spi.params.InputParam;
+import org.apache.dolphinscheduler.spi.params.PasswordParam;
+import org.apache.dolphinscheduler.spi.params.RadioParam;
+import org.apache.dolphinscheduler.spi.params.base.ParamsOptions;
+import org.apache.dolphinscheduler.spi.params.base.PluginParams;
+import org.apache.dolphinscheduler.spi.params.base.Validate;
+
+import java.util.Arrays;
+import java.util.List;
+
+public class FeiShuAlertChannelFactory implements AlertChannelFactory {
+ @Override
+ public String getName() {
+ return "Feishu";
+ }
+
+ @Override
+ public List getParams() {
+ InputParam webHookParam = InputParam.newBuilder(FeiShuParamsConstants.NAME_WEB_HOOK, FeiShuParamsConstants.WEB_HOOK)
+ .addValidate(Validate.newBuilder()
+ .setRequired(true)
+ .build())
+ .build();
+ RadioParam isEnableProxy =
+ RadioParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PROXY_ENABLE, FeiShuParamsConstants.NAME_FEI_SHU_PROXY_ENABLE)
+ .addParamsOptions(new ParamsOptions("YES", true, false))
+ .addParamsOptions(new ParamsOptions("NO", false, false))
+ .setValue(true)
+ .addValidate(Validate.newBuilder()
+ .setRequired(false)
+ .build())
+ .build();
+ InputParam proxyParam =
+ InputParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PROXY, FeiShuParamsConstants.FEI_SHU_PROXY)
+ .addValidate(Validate.newBuilder()
+ .setRequired(false).build())
+ .build();
+
+ InputParam portParam = InputParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PORT, FeiShuParamsConstants.FEI_SHU_PORT)
+ .addValidate(Validate.newBuilder()
+ .setRequired(false).build())
+ .build();
+
+ InputParam userParam =
+ InputParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_USER, FeiShuParamsConstants.FEI_SHU_USER)
+ .addValidate(Validate.newBuilder()
+ .setRequired(false).build())
+ .build();
+ PasswordParam passwordParam = PasswordParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PASSWORD, FeiShuParamsConstants.FEI_SHU_PASSWORD)
+ .setPlaceholder("if enable use authentication, you need input password")
+ .build();
+
+ return Arrays.asList(webHookParam, isEnableProxy, proxyParam, portParam, userParam, passwordParam);
+
+ }
+
+ @Override
+ public AlertChannel create() {
+ return new FeiShuAlertChannel();
+ }
+}
diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertPlugin.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertPlugin.java
new file mode 100644
index 0000000000..e71be3e2bd
--- /dev/null
+++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertPlugin.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.dolphinscheduler.plugin.alert.feishu;
+
+import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
+import org.apache.dolphinscheduler.spi.alert.AlertChannelFactory;
+
+import com.google.common.collect.ImmutableList;
+
+public class FeiShuAlertPlugin implements DolphinSchedulerPlugin {
+ @Override
+ public Iterable getAlertChannelFactorys() {
+ return ImmutableList.of(new FeiShuAlertChannelFactory());
+ }
+}
diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuParamsConstants.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuParamsConstants.java
new file mode 100644
index 0000000000..0b3c329792
--- /dev/null
+++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuParamsConstants.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.dolphinscheduler.plugin.alert.feishu;
+
+public class FeiShuParamsConstants {
+
+ private FeiShuParamsConstants() {
+ throw new IllegalStateException("Utility class");
+ }
+
+ static final String WEB_HOOK = "webhook";
+
+ static final String NAME_WEB_HOOK = "webHook";
+
+ public static final String FEI_SHU_PROXY_ENABLE = "isEnableProxy";
+
+ static final String NAME_FEI_SHU_PROXY_ENABLE = "isEnableProxy";
+
+ static final String FEI_SHU_PROXY = "proxy";
+
+ static final String NAME_FEI_SHU_PROXY = "proxy";
+
+ static final String FEI_SHU_PORT = "port";
+
+ static final String NAME_FEI_SHU_PORT = "port";
+
+ static final String FEI_SHU_USER = "user";
+
+ static final String NAME_FEI_SHU_USER = "user";
+
+ static final String FEI_SHU_PASSWORD = "password";
+
+ static final String NAME_FEI_SHU_PASSWORD = "password";
+}
diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSender.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSender.java
new file mode 100644
index 0000000000..4eee390721
--- /dev/null
+++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSender.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.dolphinscheduler.plugin.alert.feishu;
+
+import org.apache.dolphinscheduler.spi.alert.AlertData;
+import org.apache.dolphinscheduler.spi.alert.AlertResult;
+import org.apache.dolphinscheduler.spi.utils.JSONUtils;
+
+import org.apache.commons.codec.binary.StringUtils;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpHost;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.util.EntityUtils;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+public class FeiShuSender {
+
+ private static final Logger logger = LoggerFactory.getLogger(FeiShuSender.class);
+
+ private String url;
+
+ private Boolean enableProxy;
+
+ private String proxy;
+
+ private Integer port;
+
+ private String user;
+
+ private String password;
+
+ FeiShuSender(Map config) {
+ url = config.get(FeiShuParamsConstants.NAME_WEB_HOOK);
+ enableProxy = Boolean.valueOf(config.get(FeiShuParamsConstants.NAME_FEI_SHU_PROXY_ENABLE));
+ if (Boolean.TRUE.equals(enableProxy)) {
+ port = Integer.parseInt(config.get(FeiShuParamsConstants.NAME_FEI_SHU_PORT));
+ proxy = config.get(FeiShuParamsConstants.NAME_FEI_SHU_PROXY);
+ user = config.get(FeiShuParamsConstants.NAME_FEI_SHU_USER);
+ password = config.get(FeiShuParamsConstants.NAME_FEI_SHU_PASSWORD);
+ }
+
+ }
+
+ private static RequestConfig getProxyConfig(String proxy, int port) {
+ HttpHost httpProxy = new HttpHost(proxy, port);
+ return RequestConfig.custom().setProxy(httpProxy).build();
+ }
+
+ private static String textToJsonString(AlertData alertData) {
+
+ Map items = new HashMap<>(2);
+ items.put("msg_type", "text");
+ Map textContent = new HashMap<>();
+ byte[] byt = StringUtils.getBytesUtf8(formatContent(alertData));
+ String txt = StringUtils.newStringUtf8(byt);
+ textContent.put("text", txt);
+ items.put("content", textContent);
+ return JSONUtils.toJsonString(items);
+ }
+
+ private static AlertResult checkSendFeiShuSendMsgResult(String result) {
+ AlertResult alertResult = new AlertResult();
+ alertResult.setStatus("false");
+
+ if (org.apache.dolphinscheduler.spi.utils.StringUtils.isBlank(result)) {
+ alertResult.setMessage("send fei shu msg error");
+ logger.info("send fei shu msg error,fei shu server resp is null");
+ return alertResult;
+ }
+ FeiShuSendMsgResponse sendMsgResponse = JSONUtils.parseObject(result, FeiShuSendMsgResponse.class);
+
+ if (null == sendMsgResponse) {
+ alertResult.setMessage("send fei shu msg fail");
+ logger.info("send fei shu msg error,resp error");
+ return alertResult;
+ }
+ if (sendMsgResponse.statusCode == 0) {
+ alertResult.setStatus("true");
+ alertResult.setMessage("send fei shu msg success");
+ return alertResult;
+ }
+ alertResult.setMessage(String.format("alert send fei shu msg error : %s", sendMsgResponse.getStatusMessage()));
+ logger.info("alert send fei shu msg error : {} ,Extra : {} ", sendMsgResponse.getStatusMessage(), sendMsgResponse.getExtra());
+ return alertResult;
+ }
+
+ public static String formatContent(AlertData alertData) {
+ if (alertData.getContent() != null) {
+ List