dailidong 5 years ago
parent
commit
68f1e7ebf8
  1. 11
      .github/workflows/ci_ut.yml
  2. 0
      docker/README.md
  3. 24
      docker/docker-compose.yml
  4. 771
      docker/postgres/docker-entrypoint-initdb/init.sql
  5. 7
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/ExcelUtils.java
  6. 2
      dolphinscheduler-alert/src/main/resources/mail_templates/alert_mail_template.ftl
  7. 9
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/AccessTokenService.java
  8. 6
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/AlertGroupService.java
  9. 17
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProjectService.java
  10. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
  11. 2
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/CheckUtils.java
  12. 6
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
  13. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AlertStatus.java
  14. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AlertType.java
  15. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/CommandType.java
  16. 60
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbType.java
  17. 8
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ExecutionStatus.java
  18. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/FailureStrategy.java
  19. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/Flag.java
  20. 9
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/Priority.java
  21. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ReleaseState.java
  22. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ResourceType.java
  23. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/RunMode.java
  24. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ShowType.java
  25. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/SparkVersion.java
  26. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/TaskDependType.java
  27. 9
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/TaskType.java
  28. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/UdfType.java
  29. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/UserType.java
  30. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/WarningType.java
  31. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/queue/TaskQueueFactory.java
  32. 163
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/queue/TaskQueueZkImpl.java
  33. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/SpringApplicationContext.java
  34. 6
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/zk/AbstractZKClient.java
  35. 48
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/zk/ZookeeperCachedOperator.java
  36. 11
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/zk/ZookeeperOperator.java
  37. 4
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/queue/TaskQueueZKImplTest.java
  38. 130
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/ParameterUtilsTest.java
  39. 3
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/ProcessDao.java
  40. 6
      dolphinscheduler-dao/src/main/resources/application-dao.properties
  41. 2
      dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/Application.java
  42. 327
      dolphinscheduler-dist/pom.xml
  43. 21
      dolphinscheduler-dist/src/main/assembly/dolphinscheduler-binary.xml
  44. 236
      dolphinscheduler-dist/src/main/assembly/dolphinscheduler-nginx.xml
  45. 3
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java
  46. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterBaseTaskExecThread.java
  47. 1
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
  48. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterSchedulerThread.java
  49. 1
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/SubProcessTaskExecThread.java
  50. 126
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/AbstractMonitor.java
  51. 28
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/Monitor.java
  52. 63
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/MonitorServer.java
  53. 85
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/RunConfig.java
  54. 62
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/ZKMonitorImpl.java
  55. 20
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/AlertManager.java
  56. 75
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java
  57. 59
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/RemoveZKNode.java
  58. 28
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/SensitiveLogUtil.java
  59. 18
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java
  60. 92
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/log/SensitiveDataConverter.java
  61. 3
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/FetchTaskThread.java
  62. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java
  63. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/dependent/DependentExecute.java
  64. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/dependent/DependentTask.java
  65. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/http/HttpTask.java
  66. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/processdure/ProcedureTask.java
  67. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/python/PythonTask.java
  68. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTask.java
  69. 2
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
  70. 96
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java
  71. 61
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKWorkerClient.java
  72. 0
      dolphinscheduler-server/src/main/resources/config/install_config.conf
  73. 0
      dolphinscheduler-server/src/main/resources/config/run_config.conf
  74. 4
      dolphinscheduler-server/src/main/resources/worker_logback.xml
  75. 131
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtilsTest.java
  76. 37
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/SensitiveLogUtilTest.java
  77. 92
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/log/SensitiveDataConverterTest.java
  78. 2
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/shell/ShellCommandExecutorTest.java
  79. 2
      dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/sql/SqlExecutorTest.java
  80. 165
      dolphinscheduler-ui/pom.xml
  81. 31
      install.sh
  82. 19
      pom.xml
  83. 34
      script/del-zk-node.py
  84. 124
      script/monitor-server.py
  85. 52
      script/monitor-server.sh
  86. 48
      script/remove-zk-node.sh

11
.github/workflows/ci_ut.yml

@ -16,6 +16,9 @@
#
on: ["pull_request"]
env:
DOCKER_DIR: ./docker
LOG_DIR: /tmp/dolphinscheduler
name: Test Coveralls Parallel
@ -35,6 +38,8 @@ jobs:
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Bootstrap database
run: cd ${DOCKER_DIR} && docker-compose up -d
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
@ -44,3 +49,9 @@ jobs:
export MAVEN_OPTS='-Dmaven.repo.local=.m2/repository -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Xmx3g'
mvn test -Dmaven.test.skip=false cobertura:cobertura
CODECOV_TOKEN="09c2663f-b091-4258-8a47-c981827eb29a" bash <(curl -s https://codecov.io/bash)
- name: Collect logs
run: |
mkdir -p ${LOG_DIR}
cd ${DOCKER_DIR}
docker-compose logs db > ${LOG_DIR}/db.txt
continue-on-error: true

0
docker/README.md

24
docker/docker-compose.yml

@ -0,0 +1,24 @@
version: '2'
services:
zookeeper:
image: zookeeper
restart: always
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOO_MY_ID: 1
db:
image: postgres
container_name: postgres
environment:
- POSTGRES_USER=test
- POSTGRES_PASSWORD=test
- POSTGRES_DB=dolphinscheduler
ports:
- "5432:5432"
volumes:
- pgdata:/var/lib/postgresql/data
- ./postgres/docker-entrypoint-initdb:/docker-entrypoint-initdb.d
volumes:
pgdata:

771
docker/postgres/docker-entrypoint-initdb/init.sql

@ -0,0 +1,771 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
DROP TABLE IF EXISTS QRTZ_LOCKS;
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_JOB_DETAILS(
SCHED_NAME character varying(120) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
JOB_CLASS_NAME character varying(250) NOT NULL,
IS_DURABLE boolean NOT NULL,
IS_NONCONCURRENT boolean NOT NULL,
IS_UPDATE_DATA boolean NOT NULL,
REQUESTS_RECOVERY boolean NOT NULL,
JOB_DATA bytea NULL);
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE TABLE QRTZ_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
NEXT_FIRE_TIME BIGINT NULL,
PREV_FIRE_TIME BIGINT NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE character varying(16) NOT NULL,
TRIGGER_TYPE character varying(8) NOT NULL,
START_TIME BIGINT NOT NULL,
END_TIME BIGINT NULL,
CALENDAR_NAME character varying(200) NULL,
MISFIRE_INSTR SMALLINT NULL,
JOB_DATA bytea NULL) ;
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
REPEAT_COUNT BIGINT NOT NULL,
REPEAT_INTERVAL BIGINT NOT NULL,
TIMES_TRIGGERED BIGINT NOT NULL) ;
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CRON_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
CRON_EXPRESSION character varying(120) NOT NULL,
TIME_ZONE_ID character varying(80)) ;
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPROP_TRIGGERS
(
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
STR_PROP_1 character varying(512) NULL,
STR_PROP_2 character varying(512) NULL,
STR_PROP_3 character varying(512) NULL,
INT_PROP_1 INT NULL,
INT_PROP_2 INT NULL,
LONG_PROP_1 BIGINT NULL,
LONG_PROP_2 BIGINT NULL,
DEC_PROP_1 NUMERIC(13,4) NULL,
DEC_PROP_2 NUMERIC(13,4) NULL,
BOOL_PROP_1 boolean NULL,
BOOL_PROP_2 boolean NULL) ;
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_BLOB_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
BLOB_DATA bytea NULL) ;
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CALENDARS (
SCHED_NAME character varying(120) NOT NULL,
CALENDAR_NAME character varying(200) NOT NULL,
CALENDAR bytea NOT NULL) ;
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME);
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL) ;
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_FIRED_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
ENTRY_ID character varying(95) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
FIRED_TIME BIGINT NOT NULL,
SCHED_TIME BIGINT NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE character varying(16) NOT NULL,
JOB_NAME character varying(200) NULL,
JOB_GROUP character varying(200) NULL,
IS_NONCONCURRENT boolean NULL,
REQUESTS_RECOVERY boolean NULL) ;
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID);
CREATE TABLE QRTZ_SCHEDULER_STATE (
SCHED_NAME character varying(120) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT NOT NULL,
CHECKIN_INTERVAL BIGINT NOT NULL) ;
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME);
CREATE TABLE QRTZ_LOCKS (
SCHED_NAME character varying(120) NOT NULL,
LOCK_NAME character varying(40) NOT NULL) ;
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME);
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
--
-- Table structure for table t_ds_access_token
--
DROP TABLE IF EXISTS t_ds_access_token;
CREATE TABLE t_ds_access_token (
id int NOT NULL ,
user_id int DEFAULT NULL ,
token varchar(64) DEFAULT NULL ,
expire_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alert
--
DROP TABLE IF EXISTS t_ds_alert;
CREATE TABLE t_ds_alert (
id int NOT NULL ,
title varchar(64) DEFAULT NULL ,
show_type int DEFAULT NULL ,
content text ,
alert_type int DEFAULT NULL ,
alert_status int DEFAULT '0' ,
log text ,
alertgroup_id int DEFAULT NULL ,
receivers text ,
receivers_cc text ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alertgroup
--
DROP TABLE IF EXISTS t_ds_alertgroup;
CREATE TABLE t_ds_alertgroup (
id int NOT NULL ,
group_name varchar(255) DEFAULT NULL ,
group_type int DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_command
--
DROP TABLE IF EXISTS t_ds_command;
CREATE TABLE t_ds_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
executor_id int DEFAULT NULL ,
dependence varchar(255) DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group_id int DEFAULT '-1' ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_datasource
--
DROP TABLE IF EXISTS t_ds_datasource;
CREATE TABLE t_ds_datasource (
id int NOT NULL ,
name varchar(64) NOT NULL ,
note varchar(256) DEFAULT NULL ,
type int NOT NULL ,
user_id int NOT NULL ,
connection_params text NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_error_command
--
DROP TABLE IF EXISTS t_ds_error_command;
CREATE TABLE t_ds_error_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
executor_id int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
dependence text ,
process_instance_priority int DEFAULT NULL ,
worker_group_id int DEFAULT '-1' ,
message text ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_master_server
--
DROP TABLE IF EXISTS t_ds_master_server;
CREATE TABLE t_ds_master_server (
id int NOT NULL ,
host varchar(45) DEFAULT NULL ,
port int DEFAULT NULL ,
zk_directory varchar(64) DEFAULT NULL ,
res_info varchar(256) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
last_heartbeat_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_process_definition
--
DROP TABLE IF EXISTS t_ds_process_definition;
CREATE TABLE t_ds_process_definition (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
release_state int DEFAULT NULL ,
project_id int DEFAULT NULL ,
user_id int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
flag int DEFAULT NULL ,
locations text ,
connects text ,
receivers text ,
receivers_cc text ,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index process_definition_index on t_ds_process_definition (project_id,id);
--
-- Table structure for table t_ds_process_instance
--
DROP TABLE IF EXISTS t_ds_process_instance;
CREATE TABLE t_ds_process_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
state int DEFAULT NULL ,
recovery int DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
run_times int DEFAULT NULL ,
host varchar(45) DEFAULT NULL ,
command_type int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
max_try_times int DEFAULT '0' ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
command_start_time timestamp DEFAULT NULL ,
global_params text ,
process_instance_json text ,
flag int DEFAULT '1' ,
update_time timestamp NULL ,
is_sub_process int DEFAULT '0' ,
executor_id int NOT NULL ,
locations text ,
connects text ,
history_cmd text ,
dependence_schedule_times text ,
process_instance_priority int DEFAULT NULL ,
worker_group_id int DEFAULT '-1' ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
PRIMARY KEY (id)
) ;
create index process_instance_index on t_ds_process_instance (process_definition_id,id);
create index start_time_index on t_ds_process_instance (start_time);
--
-- Table structure for table t_ds_project
--
DROP TABLE IF EXISTS t_ds_project;
CREATE TABLE t_ds_project (
id int NOT NULL ,
name varchar(100) DEFAULT NULL ,
description varchar(200) DEFAULT NULL ,
user_id int DEFAULT NULL ,
flag int DEFAULT '1' ,
create_time timestamp DEFAULT CURRENT_TIMESTAMP ,
update_time timestamp DEFAULT CURRENT_TIMESTAMP ,
PRIMARY KEY (id)
) ;
create index user_id_index on t_ds_project (user_id);
--
-- Table structure for table t_ds_queue
--
DROP TABLE IF EXISTS t_ds_queue;
CREATE TABLE t_ds_queue (
id int NOT NULL ,
queue_name varchar(64) DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_relation_datasource_user
--
DROP TABLE IF EXISTS t_ds_relation_datasource_user;
CREATE TABLE t_ds_relation_datasource_user (
id int NOT NULL ,
user_id int NOT NULL ,
datasource_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_process_instance
--
DROP TABLE IF EXISTS t_ds_relation_process_instance;
CREATE TABLE t_ds_relation_process_instance (
id int NOT NULL ,
parent_process_instance_id int DEFAULT NULL ,
parent_task_instance_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_project_user
--
DROP TABLE IF EXISTS t_ds_relation_project_user;
CREATE TABLE t_ds_relation_project_user (
id int NOT NULL ,
user_id int NOT NULL ,
project_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index relation_project_user_id_index on t_ds_relation_project_user (user_id);
--
-- Table structure for table t_ds_relation_resources_user
--
DROP TABLE IF EXISTS t_ds_relation_resources_user;
CREATE TABLE t_ds_relation_resources_user (
id int NOT NULL ,
user_id int NOT NULL ,
resources_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_udfs_user
--
DROP TABLE IF EXISTS t_ds_relation_udfs_user;
CREATE TABLE t_ds_relation_udfs_user (
id int NOT NULL ,
user_id int NOT NULL ,
udf_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_user_alertgroup
--
DROP TABLE IF EXISTS t_ds_relation_user_alertgroup;
CREATE TABLE t_ds_relation_user_alertgroup (
id int NOT NULL,
alertgroup_id int DEFAULT NULL,
user_id int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_resources
--
DROP TABLE IF EXISTS t_ds_resources;
CREATE TABLE t_ds_resources (
id int NOT NULL ,
alias varchar(64) DEFAULT NULL ,
file_name varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
user_id int DEFAULT NULL ,
type int DEFAULT NULL ,
size bigint DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_schedules
--
DROP TABLE IF EXISTS t_ds_schedules;
CREATE TABLE t_ds_schedules (
id int NOT NULL ,
process_definition_id int NOT NULL ,
start_time timestamp NOT NULL ,
end_time timestamp NOT NULL ,
crontab varchar(256) NOT NULL ,
failure_strategy int NOT NULL ,
user_id int NOT NULL ,
release_state int NOT NULL ,
warning_type int NOT NULL ,
warning_group_id int DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group_id int DEFAULT '-1' ,
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_session
--
DROP TABLE IF EXISTS t_ds_session;
CREATE TABLE t_ds_session (
id varchar(64) NOT NULL ,
user_id int DEFAULT NULL ,
ip varchar(45) DEFAULT NULL ,
last_login_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_task_instance
--
DROP TABLE IF EXISTS t_ds_task_instance;
CREATE TABLE t_ds_task_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
task_type varchar(64) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
task_json text ,
state int DEFAULT NULL ,
submit_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
host varchar(45) DEFAULT NULL ,
execute_path varchar(200) DEFAULT NULL ,
log_path varchar(200) DEFAULT NULL ,
alert_flag int DEFAULT NULL ,
retry_times int DEFAULT '0' ,
pid int DEFAULT NULL ,
app_link varchar(255) DEFAULT NULL ,
flag int DEFAULT '1' ,
retry_interval int DEFAULT NULL ,
max_retry_times int DEFAULT NULL ,
task_instance_priority int DEFAULT NULL ,
worker_group_id int DEFAULT '-1' ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_tenant
--
DROP TABLE IF EXISTS t_ds_tenant;
CREATE TABLE t_ds_tenant (
id int NOT NULL ,
tenant_code varchar(64) DEFAULT NULL ,
tenant_name varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
queue_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_udfs
--
DROP TABLE IF EXISTS t_ds_udfs;
CREATE TABLE t_ds_udfs (
id int NOT NULL ,
user_id int NOT NULL ,
func_name varchar(100) NOT NULL ,
class_name varchar(255) NOT NULL ,
type int NOT NULL ,
arg_types varchar(255) DEFAULT NULL ,
database varchar(255) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
resource_id int NOT NULL ,
resource_name varchar(255) NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_user
--
DROP TABLE IF EXISTS t_ds_user;
CREATE TABLE t_ds_user (
id int NOT NULL ,
user_name varchar(64) DEFAULT NULL ,
user_password varchar(64) DEFAULT NULL ,
user_type int DEFAULT NULL ,
email varchar(64) DEFAULT NULL ,
phone varchar(11) DEFAULT NULL ,
tenant_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_version
--
DROP TABLE IF EXISTS t_ds_version;
CREATE TABLE t_ds_version (
id int NOT NULL ,
version varchar(200) NOT NULL,
PRIMARY KEY (id)
) ;
create index version_index on t_ds_version(version);
--
-- Table structure for table t_ds_worker_group
--
DROP TABLE IF EXISTS t_ds_worker_group;
CREATE TABLE t_ds_worker_group (
id bigint NOT NULL ,
name varchar(256) DEFAULT NULL ,
ip_list varchar(256) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_worker_server
--
DROP TABLE IF EXISTS t_ds_worker_server;
CREATE TABLE t_ds_worker_server (
id int NOT NULL ,
host varchar(45) DEFAULT NULL ,
port int DEFAULT NULL ,
zk_directory varchar(64) DEFAULT NULL ,
res_info varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
last_heartbeat_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence;
CREATE SEQUENCE t_ds_access_token_id_sequence;
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence;
CREATE SEQUENCE t_ds_alert_id_sequence;
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_alertgroup_id_sequence;
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence;
CREATE SEQUENCE t_ds_command_id_sequence;
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence;
CREATE SEQUENCE t_ds_datasource_id_sequence;
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_master_server_id_sequence;
CREATE SEQUENCE t_ds_master_server_id_sequence;
ALTER TABLE t_ds_master_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_master_server_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence;
CREATE SEQUENCE t_ds_process_definition_id_sequence;
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence;
CREATE SEQUENCE t_ds_process_instance_id_sequence;
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence;
CREATE SEQUENCE t_ds_project_id_sequence;
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence;
CREATE SEQUENCE t_ds_queue_id_sequence;
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence;
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence;
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence;
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence;
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence;
CREATE SEQUENCE t_ds_relation_project_user_id_sequence;
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence;
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence;
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence;
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence;
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_user_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_relation_user_alertgroup_id_sequence;
ALTER TABLE t_ds_relation_user_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_user_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence;
CREATE SEQUENCE t_ds_resources_id_sequence;
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence;
CREATE SEQUENCE t_ds_schedules_id_sequence;
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence;
CREATE SEQUENCE t_ds_task_instance_id_sequence;
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence;
CREATE SEQUENCE t_ds_tenant_id_sequence;
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence;
CREATE SEQUENCE t_ds_udfs_id_sequence;
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence;
CREATE SEQUENCE t_ds_user_id_sequence;
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence;
CREATE SEQUENCE t_ds_version_id_sequence;
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence;
CREATE SEQUENCE t_ds_worker_group_id_sequence;
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence;
CREATE SEQUENCE t_ds_worker_server_id_sequence;
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence');
-- Records of t_ds_user,user : admin , password : dolphinscheduler123
INSERT INTO t_ds_user(user_name,user_password,user_type,email,phone,tenant_id,create_time,update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', 'xx', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22');
-- Records of t_ds_alertgroup,dolphinscheduler warning group
INSERT INTO t_ds_alertgroup(group_name,group_type,description,create_time,update_time) VALUES ('dolphinscheduler warning group', '0', 'dolphinscheduler warning group','2018-11-29 10:20:39', '2018-11-29 10:20:39');
INSERT INTO t_ds_relation_user_alertgroup(alertgroup_id,user_id,create_time,update_time) VALUES ( '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_queue(queue_name,queue,create_time,update_time) VALUES ('default', 'default','2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('1.2.0');

7
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/ExcelUtils.java

@ -20,6 +20,8 @@ import org.apache.poi.hssf.usermodel.HSSFCell;
import org.apache.poi.hssf.usermodel.HSSFRow;
import org.apache.poi.hssf.usermodel.HSSFSheet;
import org.apache.poi.hssf.usermodel.HSSFWorkbook;
import org.apache.poi.ss.usermodel.CellStyle;
import org.apache.poi.ss.usermodel.HorizontalAlignment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -71,10 +73,14 @@ public class ExcelUtils {
//set the height of the first line
row.setHeight((short)500);
//set Horizontal right
CellStyle cellStyle = wb.createCellStyle();
cellStyle.setAlignment(HorizontalAlignment.RIGHT);
//setting excel headers
for (int i = 0; i < headerList.size(); i++) {
HSSFCell cell = row.createCell(i);
cell.setCellStyle(cellStyle);
cell.setCellValue(headerList.get(i));
}
@ -88,6 +94,7 @@ public class ExcelUtils {
rowIndex++;
for (int j = 0 ; j < values.length ; j++){
HSSFCell cell1 = row.createCell(j);
cell1.setCellStyle(cellStyle);
cell1.setCellValue(String.valueOf(values[j]));
}
}

2
dolphinscheduler-alert/src/main/resources/mail_templates/alert_mail_template.ftl

@ -14,4 +14,4 @@
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> dolphinscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html>
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> dolphinscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; text-align: right;} table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; text-align: right;}</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html>

9
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/AccessTokenService.java

@ -154,8 +154,13 @@ public class AccessTokenService extends BaseService {
*/
public Map<String, Object> updateToken(int id,int userId, String expireTime, String token) {
Map<String, Object> result = new HashMap<>(5);
AccessToken accessToken = new AccessToken();
accessToken.setId(id);
AccessToken accessToken = accessTokenMapper.selectById(id);
if (accessToken == null) {
logger.error("access token not exist, access token id {}", id);
putMsg(result, Status.ACCESS_TOKEN_NOT_EXIST);
return result;
}
accessToken.setUserId(userId);
accessToken.setExpireTime(DateUtils.stringToDate(expireTime));
accessToken.setToken(token);

6
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/AlertGroupService.java

@ -193,6 +193,12 @@ public class AlertGroupService extends BaseService{
if (checkAdmin(loginUser, result)){
return result;
}
//check exist
AlertGroup alertGroup = alertGroupMapper.selectById(id);
if (alertGroup == null) {
putMsg(result, Status.ALERT_GROUP_NOT_EXIST);
return result;
}
userAlertGroupMapper.deleteByAlertgroupId(id);
alertGroupMapper.deleteById(id);

17
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProjectService.java

@ -41,7 +41,7 @@ import static org.apache.dolphinscheduler.api.utils.CheckUtils.checkDesc;
/**
* project service
*HttpTask./
**/
**/
@Service
public class ProjectService extends BaseService{
@ -121,7 +121,7 @@ public class ProjectService extends BaseService{
* @param loginUser login user
* @param project project
* @param projectName project name
* @return true if the login user havve permission to see the project
* @return true if the login user have permission to see the project
*/
public Map<String, Object> checkProjectAndAuth(User loginUser, Project project, String projectName) {
@ -143,7 +143,7 @@ public class ProjectService extends BaseService{
public boolean hasProjectAndPerm(User loginUser, Project project, Map<String, Object> result) {
boolean checkResult = false;
if (project == null) {
putMsg(result, Status.PROJECT_NOT_FOUNT, project.getName());
putMsg(result, Status.PROJECT_NOT_FOUNT, "");
} else if (!checkReadPermission(loginUser, project)) {
putMsg(result, Status.USER_NO_OPERATION_PROJECT_PERM, loginUser.getUserName(), project.getName());
} else {
@ -199,13 +199,14 @@ public class ProjectService extends BaseService{
if (checkResult != null) {
return checkResult;
}
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryAllDefinitionList(projectId);
if (!hasPerm(loginUser, project.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryAllDefinitionList(projectId);
if(processDefinitionList.size() > 0){
putMsg(result, Status.DELETE_PROJECT_ERROR_DEFINES_NOT_NULL);
return result;
@ -227,7 +228,8 @@ public class ProjectService extends BaseService{
* @return check result
*/
private Map<String, Object> getCheckResult(User loginUser, Project project) {
Map<String, Object> checkResult = checkProjectAndAuth(loginUser, project, project.getName());
String projectName = project == null ? null:project.getName();
Map<String, Object> checkResult = checkProjectAndAuth(loginUser, project, projectName);
Status status = (Status) checkResult.get(Constants.STATUS);
if (status != Status.SUCCESS) {
return checkResult;
@ -247,6 +249,11 @@ public class ProjectService extends BaseService{
public Map<String, Object> update(User loginUser, Integer projectId, String projectName, String desc) {
Map<String, Object> result = new HashMap<>(5);
Map<String, Object> descCheck = checkDesc(desc);
if (descCheck.get(Constants.STATUS) != Status.SUCCESS) {
return descCheck;
}
Project project = projectMapper.selectById(projectId);
boolean hasProjectAndPerm = hasProjectAndPerm(loginUser, project, result);
if (!hasProjectAndPerm) {

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java

@ -210,7 +210,6 @@ public class ResourcesService extends BaseService {
}
Resource resource = resourcesMapper.selectById(resourceId);
String originResourceName = resource.getAlias();
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
@ -236,6 +235,7 @@ public class ResourcesService extends BaseService {
}
//get the file suffix
String originResourceName = resource.getAlias();
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
//if the name without suffix then add it ,else use the origin name

2
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/CheckUtils.java

@ -104,7 +104,7 @@ public class CheckUtils {
* @return true if phone regex valid, otherwise return false
*/
public static boolean checkPhone(String phone) {
return StringUtils.isEmpty(phone) || phone.length() <= 11;
return StringUtils.isEmpty(phone) || phone.length() == 11;
}

6
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java

@ -1005,4 +1005,10 @@ public final class Constants {
public static final String CLASS = "class";
public static final String RECEIVERS = "receivers";
public static final String RECEIVERS_CC = "receiversCc";
/**
* dataSource sensitive param
*/
public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))";
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AlertStatus.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* alert status
*/
@Getter
public enum AlertStatus {
/**
* 0 waiting executed; 1 execute successfully2 execute failed
@ -40,4 +38,12 @@ public enum AlertStatus {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AlertType.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* warning message notification method
*/
@Getter
public enum AlertType {
/**
* 0 email; 1 SMS
@ -39,4 +37,12 @@ public enum AlertType {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/CommandType.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* command types
*/
@Getter
public enum CommandType {
/**
@ -59,4 +57,12 @@ public enum CommandType {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

60
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbType.java

@ -17,38 +17,44 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* data base types
*/
@Getter
public enum DbType {
/**
* 0 mysql
* 1 postgresql
* 2 hive
* 3 spark
* 4 clickhouse
* 5 oracle
* 6 sqlserver
* 7 db2
*/
MYSQL(0, "mysql"),
POSTGRESQL(1, "postgresql"),
HIVE(2, "hive"),
SPARK(3, "spark"),
CLICKHOUSE(4, "clickhouse"),
ORACLE(5, "oracle"),
SQLSERVER(6, "sqlserver"),
DB2(7, "db2");
/**
* 0 mysql
* 1 postgresql
* 2 hive
* 3 spark
* 4 clickhouse
* 5 oracle
* 6 sqlserver
* 7 db2
*/
MYSQL(0, "mysql"),
POSTGRESQL(1, "postgresql"),
HIVE(2, "hive"),
SPARK(3, "spark"),
CLICKHOUSE(4, "clickhouse"),
ORACLE(5, "oracle"),
SQLSERVER(6, "sqlserver"),
DB2(7, "db2");
DbType(int code, String descp){
this.code = code;
this.descp = descp;
}
DbType(int code, String descp) {
this.code = code;
this.descp = descp;
}
@EnumValue
private final int code;
private final String descp;
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

8
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ExecutionStatus.java

@ -18,13 +18,11 @@ package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* running status for workflow and task nodes
*
*/
@Getter
public enum ExecutionStatus {
/**
@ -123,5 +121,11 @@ public enum ExecutionStatus {
return this == KILL || this == STOP ;
}
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/FailureStrategy.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* failure policy when some task node failed.
*/
@Getter
public enum FailureStrategy {
/**
@ -40,4 +38,12 @@ public enum FailureStrategy {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/Flag.java

@ -17,7 +17,6 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* have_script
@ -27,7 +26,6 @@ import lombok.Getter;
* have_map_variables
* have_alert
*/
@Getter
public enum Flag {
/**
* 0 no
@ -45,4 +43,12 @@ public enum Flag {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

9
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/Priority.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* define process and task priority
*/
@Getter
public enum Priority {
/**
* 0 highest priority
@ -46,4 +44,11 @@ public enum Priority {
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ReleaseState.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* process define release state
*/
@Getter
public enum ReleaseState {
/**
@ -50,4 +48,12 @@ public enum ReleaseState {
//For values out of enum scope
return null;
}
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ResourceType.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* resource type
*/
@Getter
public enum ResourceType {
/**
* 0 file, 1 udf
@ -39,4 +37,12 @@ public enum ResourceType {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/RunMode.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* complement data run mode
*/
@Getter
public enum RunMode {
/**
* 0 serial run
@ -39,4 +37,12 @@ public enum RunMode {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ShowType.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* show type for email
*/
@Getter
public enum ShowType {
/**
* 0 TABLE;
@ -44,4 +42,12 @@ public enum ShowType {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/SparkVersion.java

@ -17,9 +17,7 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
@Getter
public enum SparkVersion {
/**
@ -37,4 +35,12 @@ public enum SparkVersion {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/TaskDependType.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* task node depend type
*/
@Getter
public enum TaskDependType {
/**
* 0 run current tasks only
@ -41,4 +39,12 @@ public enum TaskDependType {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

9
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/TaskType.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* task node type
*/
@Getter
public enum TaskType {
/**
* 0 SHELL
@ -61,4 +59,11 @@ public enum TaskType {
return !(taskType == TaskType.SUB_PROCESS || taskType == TaskType.DEPENDENT);
}
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/UdfType.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* UDF type
*/
@Getter
public enum UdfType {
/**
* 0 hive; 1 spark
@ -38,4 +36,12 @@ public enum UdfType {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/UserType.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* user type
*/
@Getter
public enum UserType {
/**
* 0 admin user; 1 general user
@ -39,5 +37,13 @@ public enum UserType {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/WarningType.java

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
import lombok.Getter;
/**
* types for whether to send warning when process ending;
*/
@Getter
public enum WarningType {
/**
* 0 do not send warning;
@ -44,4 +42,12 @@ public enum WarningType {
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
}

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/queue/TaskQueueFactory.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.queue;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -43,7 +44,7 @@ public class TaskQueueFactory {
String queueImplValue = CommonUtils.getQueueImplValue();
if (StringUtils.isNotBlank(queueImplValue)) {
logger.info("task queue impl use zookeeper ");
return TaskQueueZkImpl.getInstance();
return SpringApplicationContext.getBean(TaskQueueZkImpl.class);
}else{
logger.error("property dolphinscheduler.queue.impl can't be blank, system will exit ");
System.exit(-1);

163
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/queue/TaskQueueZkImpl.java

@ -17,22 +17,14 @@
package org.apache.dolphinscheduler.common.queue;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.Bytes;
import org.apache.dolphinscheduler.common.utils.IpUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.zk.DefaultEnsembleProvider;
import org.apache.dolphinscheduler.common.zk.ZookeeperConfig;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.Stat;
import org.apache.dolphinscheduler.common.zk.ZookeeperOperator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.*;
@ -40,35 +32,13 @@ import java.util.*;
* A singleton of a task queue implemented with zookeeper
* tasks queue implemention
*/
@Service
public class TaskQueueZkImpl implements ITaskQueue {
private static final Logger logger = LoggerFactory.getLogger(TaskQueueZkImpl.class);
private static volatile TaskQueueZkImpl instance;
private CuratorFramework zkClient;
private ZookeeperConfig zookeeperConfig;
private CuratorFramework getZkClient() {
return zkClient;
}
private TaskQueueZkImpl(){
init();
}
public static TaskQueueZkImpl getInstance(){
if (null == instance) {
synchronized (TaskQueueZkImpl.class) {
if(null == instance) {
instance = new TaskQueueZkImpl();
}
}
}
return instance;
}
@Autowired
private ZookeeperOperator zookeeperOperator;
/**
* get all tasks from tasks queue
@ -78,14 +48,12 @@ public class TaskQueueZkImpl implements ITaskQueue {
@Override
public List<String> getAllTasks(String key) {
try {
List<String> list = getZkClient().getChildren().forPath(getTasksPath(key));
List<String> list = zookeeperOperator.getChildrenKeys(getTasksPath(key));
return list;
} catch (Exception e) {
logger.error("get all tasks from tasks queue exception",e);
}
return new ArrayList<String>();
return new ArrayList<>();
}
/**
@ -99,22 +67,8 @@ public class TaskQueueZkImpl implements ITaskQueue {
public boolean checkTaskExists(String key, String task) {
String taskPath = getTasksPath(key) + Constants.SINGLE_SLASH + task;
try {
Stat stat = zkClient.checkExists().forPath(taskPath);
if(null == stat){
logger.info("check task:{} not exist in task queue",task);
return false;
}else{
logger.info("check task {} exists in task queue ",task);
return true;
}
return zookeeperOperator.isExisted(taskPath);
} catch (Exception e) {
logger.info(String.format("task {} check exists in task queue exception ", task), e);
}
return false;
}
@ -128,9 +82,7 @@ public class TaskQueueZkImpl implements ITaskQueue {
public boolean add(String key, String value){
try {
String taskIdPath = getTasksPath(key) + Constants.SINGLE_SLASH + value;
String result = getZkClient().create().withMode(CreateMode.PERSISTENT).forPath(taskIdPath, Bytes.toBytes(value));
logger.info("add task : {} to tasks queue , result success",result);
zookeeperOperator.persist(taskIdPath, value);
return true;
} catch (Exception e) {
logger.error("add task to tasks queue exception",e);
@ -153,8 +105,7 @@ public class TaskQueueZkImpl implements ITaskQueue {
@Override
public List<String> poll(String key, int tasksNum) {
try{
CuratorFramework zk = getZkClient();
List<String> list = zk.getChildren().forPath(getTasksPath(key));
List<String> list = zookeeperOperator.getChildrenKeys(getTasksPath(key));
if(list != null && list.size() > 0){
@ -277,15 +228,12 @@ public class TaskQueueZkImpl implements ITaskQueue {
@Override
public void removeNode(String key, String nodeValue){
CuratorFramework zk = getZkClient();
String tasksQueuePath = getTasksPath(key) + Constants.SINGLE_SLASH;
String taskIdPath = tasksQueuePath + nodeValue;
logger.info("consume task {}", taskIdPath);
logger.info("removeNode task {}", taskIdPath);
try{
Stat stat = zk.checkExists().forPath(taskIdPath);
if(stat != null){
zk.delete().forPath(taskIdPath);
}
zookeeperOperator.remove(taskIdPath);
}catch(Exception e){
logger.error(String.format("delete task:%s from zookeeper fail, exception:" ,nodeValue) ,e);
}
@ -307,13 +255,10 @@ public class TaskQueueZkImpl implements ITaskQueue {
if(value != null && value.trim().length() > 0){
String path = getTasksPath(key) + Constants.SINGLE_SLASH;
CuratorFramework zk = getZkClient();
Stat stat = zk.checkExists().forPath(path + value);
if(null == stat){
String result = zk.create().withMode(CreateMode.PERSISTENT).forPath(path + value,Bytes.toBytes(value));
logger.info("add task:{} to tasks set result:{} ",value,result);
}else{
if(!zookeeperOperator.isExisted(path + value)){
zookeeperOperator.persist(path + value,value);
logger.info("add task:{} to tasks set ",value);
} else{
logger.info("task {} exists in tasks set ",value);
}
@ -336,15 +281,7 @@ public class TaskQueueZkImpl implements ITaskQueue {
public void srem(String key, String value) {
try{
String path = getTasksPath(key) + Constants.SINGLE_SLASH;
CuratorFramework zk = getZkClient();
Stat stat = zk.checkExists().forPath(path + value);
if(null != stat){
zk.delete().forPath(path + value);
logger.info("delete task:{} from tasks set ",value);
}else{
logger.info("delete task:{} from tasks set fail, there is no this task",value);
}
zookeeperOperator.remove(path + value);
}catch(Exception e){
logger.error(String.format("delete task:" + value + " exception"),e);
@ -363,7 +300,7 @@ public class TaskQueueZkImpl implements ITaskQueue {
Set<String> tasksSet = new HashSet<>();
try {
List<String> list = getZkClient().getChildren().forPath(getTasksPath(key));
List<String> list = zookeeperOperator.getChildrenKeys(getTasksPath(key));
for (String task : list) {
tasksSet.add(task);
@ -377,56 +314,6 @@ public class TaskQueueZkImpl implements ITaskQueue {
return tasksSet;
}
/**
* Init the task queue of zookeeper node
*/
private void init(){
initZkClient();
try {
String tasksQueuePath = getTasksPath(Constants.DOLPHINSCHEDULER_TASKS_QUEUE);
String tasksCancelPath = getTasksPath(Constants.DOLPHINSCHEDULER_TASKS_KILL);
for(String taskQueuePath : new String[]{tasksQueuePath,tasksCancelPath}){
if(zkClient.checkExists().forPath(taskQueuePath) == null){
// create a persistent parent node
zkClient.create().creatingParentContainersIfNeeded()
.withMode(CreateMode.PERSISTENT).forPath(taskQueuePath);
logger.info("create tasks queue parent node success : {} ",taskQueuePath);
}
}
} catch (Exception e) {
logger.error("create zk node failure",e);
}
}
private void initZkClient() {
Configuration conf = null;
try {
conf = new PropertiesConfiguration(Constants.ZOOKEEPER_PROPERTIES_PATH);
} catch (ConfigurationException ex) {
logger.error("load zookeeper properties file failed, system exit");
System.exit(-1);
}
zkClient = CuratorFrameworkFactory.builder().ensembleProvider(new DefaultEnsembleProvider(conf.getString("zookeeper.quorum")))
.retryPolicy(new ExponentialBackoffRetry(conf.getInt("zookeeper.retry.base.sleep"), conf.getInt("zookeeper.retry.maxtime"), conf.getInt("zookeeper.retry.max.sleep")))
.sessionTimeoutMs(conf.getInt("zookeeper.session.timeout"))
.connectionTimeoutMs(conf.getInt("zookeeper.connection.timeout"))
.build();
zkClient.start();
try {
zkClient.blockUntilConnected();
} catch (final Exception ex) {
throw new RuntimeException(ex);
}
}
/**
* Clear the task queue of zookeeper node
*/
@ -437,16 +324,12 @@ public class TaskQueueZkImpl implements ITaskQueue {
String tasksCancelPath = getTasksPath(Constants.DOLPHINSCHEDULER_TASKS_KILL);
for(String taskQueuePath : new String[]{tasksQueuePath,tasksCancelPath}){
if(zkClient.checkExists().forPath(taskQueuePath) != null){
List<String> list = zkClient.getChildren().forPath(taskQueuePath);
if(zookeeperOperator.isExisted(taskQueuePath)){
List<String> list = zookeeperOperator.getChildrenKeys(taskQueuePath);
for (String task : list) {
zkClient.delete().forPath(taskQueuePath + Constants.SINGLE_SLASH + task);
zookeeperOperator.remove(taskQueuePath + Constants.SINGLE_SLASH + task);
logger.info("delete task from tasks queue : {}/{} ",taskQueuePath,task);
}
}
}

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/SpringApplicationContext.java → dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/SpringApplicationContext.java

@ -14,7 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
package org.apache.dolphinscheduler.common.utils;
import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;

6
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/zk/AbstractZKClient.java

@ -144,7 +144,7 @@ public abstract class AbstractZKClient extends ZookeeperCachedOperator{
String parentPath = getZNodeParentPath(zkNodeType);
String serverPathPrefix = parentPath + "/" + OSUtils.getHost();
String registerPath = zkClient.create().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(
serverPathPrefix + "_", heartbeatZKInfo.getBytes());
serverPathPrefix + UNDERLINE, heartbeatZKInfo.getBytes());
logger.info("register {} node {} success" , zkNodeType.toString(), registerPath);
return registerPath;
}
@ -307,7 +307,7 @@ public abstract class AbstractZKClient extends ZookeeperCachedOperator{
}
Map<String, String> serverMaps = getServerMaps(zkNodeType);
for(String hostKey : serverMaps.keySet()){
if(hostKey.startsWith(host)){
if(hostKey.startsWith(host + UNDERLINE)){
return true;
}
}
@ -461,7 +461,7 @@ public abstract class AbstractZKClient extends ZookeeperCachedOperator{
if (serverHost.equals(OSUtils.getHost())) {
logger.error("{} server({}) of myself dead , stopping...",
zkNodeType.toString(), serverHost);
stoppable.stop(String.format(" {} server {} of myself dead , stopping...",
stoppable.stop(String.format(" %s server %s of myself dead , stopping...",
zkNodeType.toString(), serverHost));
return true;
}

48
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/zk/ZookeeperCachedOperator.java

@ -16,8 +16,10 @@
*/
package org.apache.dolphinscheduler.common.zk;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.ChildData;
import org.apache.curator.framework.recipes.cache.TreeCache;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.curator.framework.recipes.cache.TreeCacheListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -34,30 +36,37 @@ public class ZookeeperCachedOperator extends ZookeeperOperator {
private final Logger logger = LoggerFactory.getLogger(ZookeeperCachedOperator.class);
//kay is zk path, value is TreeCache
private ConcurrentHashMap<String, TreeCache> allCaches = new ConcurrentHashMap<>();
TreeCache treeCache;
/**
* @param cachePath zk path
* @param listener operator
* register a unified listener of /${dsRoot},
*/
public void registerListener(final String cachePath, final TreeCacheListener listener) {
TreeCache newCache = new TreeCache(zkClient, cachePath);
logger.info("add listener to zk path: {}", cachePath);
@Override
protected void registerListener() {
treeCache = new TreeCache(zkClient, getZookeeperConfig().getDsRoot());
logger.info("add listener to zk path: {}", getZookeeperConfig().getDsRoot());
try {
newCache.start();
treeCache.start();
} catch (Exception e) {
logger.error("add listener to zk path: {} failed", cachePath);
logger.error("add listener to zk path: {} failed", getZookeeperConfig().getDsRoot());
throw new RuntimeException(e);
}
newCache.getListenable().addListener(listener);
treeCache.getListenable().addListener((client, event) -> {
String path = null == event.getData() ? "" : event.getData().getPath();
if (path.isEmpty()) {
return;
}
dataChanged(client, event, path);
});
allCaches.put(cachePath, newCache);
}
//for sub class
protected void dataChanged(final CuratorFramework client, final TreeCacheEvent event, final String path){}
public String getFromCache(final String cachePath, final String key) {
ChildData resultInCache = allCaches.get(checkNotNull(cachePath)).getCurrentData(key);
ChildData resultInCache = treeCache.getCurrentData(key);
if (null != resultInCache) {
return null == resultInCache.getData() ? null : new String(resultInCache.getData(), StandardCharsets.UTF_8);
}
@ -65,18 +74,15 @@ public class ZookeeperCachedOperator extends ZookeeperOperator {
}
public TreeCache getTreeCache(final String cachePath) {
return allCaches.get(checkNotNull(cachePath));
return treeCache;
}
public void close() {
allCaches.forEach((path, cache) -> {
cache.close();
try {
Thread.sleep(500);
} catch (InterruptedException ignore) {
}
});
treeCache.close();
try {
Thread.sleep(500);
} catch (InterruptedException ignore) {
}
super.close();
}
}

11
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/zk/ZookeeperOperator.java

@ -57,11 +57,13 @@ public class ZookeeperOperator implements InitializingBean {
public void afterPropertiesSet() throws Exception {
this.zkClient = buildClient();
initStateLister();
//init();
registerListener();
}
//for subclass
//protected void init(){}
/**
* this method is for sub class,
*/
protected void registerListener(){}
public void initStateLister() {
checkNotNull(zkClient);
@ -127,9 +129,6 @@ public class ZookeeperOperator implements InitializingBean {
List<String> values;
try {
values = zkClient.getChildren().forPath(key);
if (CollectionUtils.isEmpty(values)) {
logger.warn("getChildrenKeys key : {} is empty", key);
}
return values;
} catch (InterruptedException ex) {
logger.error("getChildrenKeys key : {} InterruptedException", key);

4
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/queue/TaskQueueZKImplTest.java

@ -32,11 +32,9 @@ import static org.junit.Assert.*;
/**
* task queue test
*/
@Ignore
public class TaskQueueZKImplTest extends BaseTaskQueueTest {
@Before
public void before(){

130
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/ParameterUtilsTest.java

@ -0,0 +1,130 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.lang.time.DateUtils;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DataType;
import org.apache.dolphinscheduler.common.enums.Direct;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.utils.placeholder.PlaceholderUtils;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import static org.apache.dolphinscheduler.common.Constants.PARAMETER_FORMAT_TIME;
import static org.apache.dolphinscheduler.common.utils.placeholder.TimePlaceholderUtils.replacePlaceholders;
public class ParameterUtilsTest {
public static final Logger logger = LoggerFactory.getLogger(ParameterUtilsTest.class);
/**
* Test convertParameterPlaceholders
*/
@Test
public void testConvertParameterPlaceholders() throws Exception {
// parameterString,parameterMap is null
Assert.assertNull(ParameterUtils.convertParameterPlaceholders(null, null));
// parameterString is null,parameterMap is not null
Map<String, String> parameterMap = new HashMap<String,String>();
parameterMap.put("testParameter","testParameter");
Assert.assertNull(ParameterUtils.convertParameterPlaceholders(null, parameterMap));
// parameterString、parameterMap is not null
String parameterString = "test_parameter";
Assert.assertEquals(parameterString, ParameterUtils.convertParameterPlaceholders(parameterString, parameterMap));
//replace variable ${} form
parameterMap.put("testParameter2","${testParameter}");
Assert.assertEquals(parameterString,PlaceholderUtils.replacePlaceholders(parameterString, parameterMap, true));
// replace time $[...] form, eg. $[yyyyMMdd]
Date cronTime = new Date();
Assert.assertEquals(parameterString, replacePlaceholders(parameterString, cronTime, true));
// replace time $[...] form, eg. $[yyyyMMdd]
Date cronTimeStr = DateUtils.parseDate("20191220145900", new String[]{PARAMETER_FORMAT_TIME});
Assert.assertEquals(parameterString, replacePlaceholders(parameterString, cronTimeStr, true));
}
/**
* Test curingGlobalParams
*/
@Test
public void testCuringGlobalParams() throws Exception {
//define globalMap
Map<String, String> globalParamMap = new HashMap<>();
globalParamMap.put("globalParams1","Params1");
//define globalParamList
List<Property> globalParamList = new ArrayList<>();
//define scheduleTime
Date scheduleTime = DateUtils.parseDate("20191220145900", new String[]{PARAMETER_FORMAT_TIME});
//test globalParamList is null
String result = ParameterUtils.curingGlobalParams(globalParamMap, globalParamList, CommandType.START_CURRENT_TASK_PROCESS, scheduleTime);
Assert.assertNull(result);
Assert.assertNull(ParameterUtils.curingGlobalParams(null,null,CommandType.START_CURRENT_TASK_PROCESS,null));
Assert.assertNull(ParameterUtils.curingGlobalParams(globalParamMap,null,CommandType.START_CURRENT_TASK_PROCESS,scheduleTime));
//test globalParamList is not null
Property property=new Property("testGlobalParam", Direct.IN, DataType.VARCHAR,"testGlobalParam");
globalParamList.add(property);
String result2 = ParameterUtils.curingGlobalParams(null,globalParamList,CommandType.START_CURRENT_TASK_PROCESS,scheduleTime);
Assert.assertEquals(result2, JSONObject.toJSONString(globalParamList));
String result3 = ParameterUtils.curingGlobalParams(globalParamMap,globalParamList,CommandType.START_CURRENT_TASK_PROCESS,null);
Assert.assertEquals(result3, JSONObject.toJSONString(globalParamList));
String result4 = ParameterUtils.curingGlobalParams(globalParamMap, globalParamList, CommandType.START_CURRENT_TASK_PROCESS, scheduleTime);
Assert.assertEquals(result4, JSONObject.toJSONString(globalParamList));
//test var $ startsWith
globalParamMap.put("bizDate","${system.biz.date}");
globalParamMap.put("b1zCurdate","${system.biz.curdate}");
Property property2=new Property("testParamList1", Direct.IN, DataType.VARCHAR,"testParamList");
Property property3=new Property("testParamList2", Direct.IN, DataType.VARCHAR,"{testParamList1}");
Property property4=new Property("testParamList3", Direct.IN, DataType.VARCHAR,"${b1zCurdate}");
globalParamList.add(property2);
globalParamList.add(property3);
globalParamList.add(property4);
String result5 = ParameterUtils.curingGlobalParams(globalParamMap, globalParamList, CommandType.START_CURRENT_TASK_PROCESS, scheduleTime);
Assert.assertEquals(result5,JSONUtils.toJsonString(globalParamList));
}
/**
* Test handleEscapes
*/
@Test
public void testHandleEscapes() throws Exception {
Assert.assertNull(ParameterUtils.handleEscapes(null));
Assert.assertEquals("",ParameterUtils.handleEscapes(""));
Assert.assertEquals("test Parameter",ParameterUtils.handleEscapes("test Parameter"));
Assert.assertEquals("////%test////%Parameter",ParameterUtils.handleEscapes("%test%Parameter"));
}
}

3
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/ProcessDao.java

@ -105,7 +105,8 @@ public class ProcessDao {
/**
* task queue impl
*/
protected ITaskQueue taskQueue = TaskQueueFactory.getTaskQueueInstance();
@Autowired
private ITaskQueue taskQueue;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
* @param logger logger

6
dolphinscheduler-dao/src/main/resources/application-dao.properties

@ -19,12 +19,12 @@
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
# postgre
spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url=jdbc:postgresql://192.168.xx.xx:5432/dolphinscheduler
spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler
# mysql
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
spring.datasource.username=xx
spring.datasource.password=xx
spring.datasource.username=test
spring.datasource.password=test
# connection configuration
spring.datasource.initialSize=5

2
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/Application.java

@ -21,7 +21,7 @@ import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.ComponentScan;
@SpringBootApplication
@ComponentScan("org.apache.dolphinscheduler.dao")
@ComponentScan("org.apache.dolphinscheduler")
public class Application {
public static void main(String[] args) {

327
dolphinscheduler-dist/pom.xml vendored

@ -101,6 +101,333 @@
</plugins>
</build>
</profile>
<profile>
<id>nginx</id>
<build>
<plugins>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<executions>
<execution>
<id>dolphinscheduler-nginx</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<descriptors>
<descriptor>src/main/assembly/dolphinscheduler-nginx.xml</descriptor>
</descriptors>
<appendAssemblyId>true</appendAssemblyId>
</configuration>
</execution>
<execution>
<id>src</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<descriptors>
<descriptor>src/main/assembly/dolphinscheduler-src.xml</descriptor>
</descriptors>
<appendAssemblyId>true</appendAssemblyId>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>rpmbuild</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<configuration>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
<overWriteReleases>false</overWriteReleases>
<overWriteSnapshots>false</overWriteSnapshots>
<overWriteIfNewer>true</overWriteIfNewer>
<excludeScope>provided</excludeScope>
</configuration>
<executions>
<execution>
<id>copy-dependencies</id>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<extensions>true</extensions>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>attached-rpm</goal>
</goals>
</execution>
</executions>
<configuration>
<name>apache-dolphinscheduler-incubating</name>
<release>1</release>
<distribution>apache dolphinscheduler incubating rpm</distribution>
<group>apache</group>
<packager>dolphinscheduler</packager>
<!-- <version>${project.version}</version> -->
<prefix>/opt/soft</prefix>
<defineStatements>
<!-- disable compile python when rpm build -->
<defineStatement>__os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g')</defineStatement>
</defineStatements>
<mappings>
<mapping>
<directory>/opt/soft/${project.build.finalName}/conf</directory>
<filemode>755</filemode>
<username>root</username>
<groupname>root</groupname>
<sources>
<source>
<location>
${basedir}/../dolphinscheduler-alert/src/main/resources
</location>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
<include>**/*.ftl</include>
</includes>
</source>
<source>
<location>
${basedir}/../dolphinscheduler-common/src/main/resources
</location>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
</includes>
</source>
<source>
<location>
${basedir}/../dolphinscheduler-dao/src/main/resources
</location>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
<include>**/*.yml</include>
</includes>
</source>
<source>
<location>
${basedir}/../dolphinscheduler-api/src/main/resources
</location>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
</includes>
</source>
<source>
<location>
${basedir}/../dolphinscheduler-server/src/main/resources
</location>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
<include>config/*.*</include>
</includes>
</source>
<source>
<location>
${basedir}/../script
</location>
<includes>
<include>env/*.*</include>
</includes>
</source>
</sources>
</mapping>
<mapping>
<directory>/opt/soft/${project.build.finalName}/lib</directory>
<filemode>755</filemode>
<username>root</username>
<groupname>root</groupname>
<sources>
<source>
<location>
${basedir}/../dolphinscheduler-dist/target/lib
</location>
<includes>
<include>*.*</include>
</includes>
<excludes>
<exclude>servlet-api-*.jar</exclude>
<exclude>slf4j-log4j12-${slf4j.log4j12.version}.jar</exclude>
</excludes>
</source>
</sources>
</mapping>
<mapping>
<directory>/opt/soft/${project.build.finalName}/bin</directory>
<filemode>755</filemode>
<username>root</username>
<groupname>root</groupname>
<sources>
<source>
<location>
${basedir}/../script
</location>
<includes>
<include>start-all.sh</include>
<include>stop-all.sh</include>
<include>dolphinscheduler-daemon.sh</include>
</includes>
</source>
</sources>
</mapping>
<mapping>
<directory>/opt/soft/${project.build.finalName}</directory>
<filemode>755</filemode>
<username>root</username>
<groupname>root</groupname>
<sources>
<source>
<location>
${basedir}/../
</location>
<includes>
<include>*.sh</include>
<include>*.py</include>
<include>DISCLAIMER</include>
</includes>
</source>
<source>
<location>
${basedir}/../dolphinscheduler-ui
</location>
<includes>
<include>install-dolphinscheduler-ui.sh</include>
</includes>
</source>
<source>
<location>
${basedir}/release-docs
</location>
<includes>
<include>**/*</include>
</includes>
</source>
</sources>
</mapping>
<mapping>
<directory>/opt/soft/${project.build.finalName}/dist</directory>
<filemode>755</filemode>
<username>root</username>
<groupname>root</groupname>
<sources>
<source>
<location>
${basedir}/../dolphinscheduler-ui/dist
</location>
<includes>
<include>**/*.*</include>
</includes>
</source>
</sources>
</mapping>
<mapping>
<directory>/opt/soft/${project.build.finalName}/sql</directory>
<filemode>755</filemode>
<username>root</username>
<groupname>root</groupname>
<sources>
<source>
<location>
${basedir}/../sql
</location>
<includes>
<include>**/*.*</include>
</includes>
</source>
</sources>
</mapping>
<mapping>
<directory>/opt/soft/${project.build.finalName}/script</directory>
<filemode>755</filemode>
<username>root</username>
<groupname>root</groupname>
<sources>
<source>
<location>
${basedir}/../script
</location>
<includes>
<include>**/*.*</include>
</includes>
</source>
</sources>
</mapping>
</mappings>
<preinstallScriptlet>
<script>mkdir -p /opt/soft</script>
</preinstallScriptlet>
<postremoveScriptlet>
<script>rm -rf /opt/soft/apache-dolphinscheduler-incubating-${project.version}</script>
</postremoveScriptlet>
</configuration>
</plugin>
</plugins>
</build>
</profile>
</profiles>

21
dolphinscheduler-dist/src/main/assembly/dolphinscheduler-binary.xml vendored

@ -94,26 +94,12 @@
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
<include>config/*.*</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-common/src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-common/src/main/resources/bin</directory>
<includes>
<include>*.*</include>
</includes>
<directoryMode>755</directoryMode>
<outputDirectory>bin</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-dao/src/main/resources</directory>
<includes>
@ -177,7 +163,6 @@
<fileSet>
<directory>${basedir}/../script</directory>
<includes>
<include>config/*.*</include>
<include>env/*.*</include>
</includes>
<outputDirectory>./conf</outputDirectory>

236
dolphinscheduler-dist/src/main/assembly/dolphinscheduler-nginx.xml vendored

@ -0,0 +1,236 @@
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<assembly
xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
<id>dolphinscheduler-nginx</id>
<formats>
<format>tar.gz</format>
</formats>
<includeBaseDirectory>true</includeBaseDirectory>
<baseDirectory>${project.build.finalName}-dolphinscheduler-bin</baseDirectory>
<fileSets>
<!--alert start-->
<fileSet>
<directory>${basedir}/../dolphinscheduler-alert/src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
<include>**/*.ftl</include>
</includes>
<outputDirectory>./conf</outputDirectory>
</fileSet>
<!--alert end-->
<!--api start-->
<fileSet>
<directory>src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-common/src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-common/src/main/resources/bin</directory>
<includes>
<include>*.*</include>
</includes>
<directoryMode>755</directoryMode>
<outputDirectory>bin</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-dao/src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-api/src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<!--api end-->
<!--server start-->
<fileSet>
<directory>${basedir}/../dolphinscheduler-server/src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
<include>config/*.*</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-common/src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-common/src/main/resources/bin</directory>
<includes>
<include>*.*</include>
</includes>
<directoryMode>755</directoryMode>
<outputDirectory>bin</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-dao/src/main/resources</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.json</include>
<include>**/*.yml</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<!--server end-->
<fileSet>
<directory>${basedir}/../dolphinscheduler-server/target/dolphinscheduler-server-${project.version}</directory>
<includes>
<include>**/*.*</include>
</includes>
<outputDirectory>.</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-api/target/dolphinscheduler-api-${project.version}</directory>
<includes>
<include>**/*.*</include>
</includes>
<outputDirectory>.</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-alert/target/dolphinscheduler-alert-${project.version}</directory>
<includes>
<include>**/*.*</include>
</includes>
<outputDirectory>.</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-ui/dist</directory>
<includes>
<include>**/*.*</include>
</includes>
<outputDirectory>./ui/dist</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../dolphinscheduler-ui</directory>
<includes>
<include>install-dolphinscheduler-ui.sh</include>
</includes>
<outputDirectory>./ui</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../sql</directory>
<includes>
<include>**/*</include>
</includes>
<outputDirectory>./sql</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../script</directory>
<includes>
<include>*.*</include>
</includes>
<outputDirectory>./script</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../script</directory>
<includes>
<include>env/*.*</include>
</includes>
<outputDirectory>./conf</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/../script</directory>
<includes>
<include>start-all.sh</include>
<include>stop-all.sh</include>
<include>dolphinscheduler-daemon.sh</include>
</includes>
<outputDirectory>./bin</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/.././</directory>
<includes>
<include>*.sh</include>
<include>*.py</include>
<include>DISCLAIMER</include>
</includes>
<outputDirectory>.</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/release-docs</directory>
<useDefaultExcludes>true</useDefaultExcludes>
<includes>
<include>**/*</include>
</includes>
<outputDirectory>.</outputDirectory>
</fileSet>
</fileSets>
<dependencySets>
<dependencySet>
<outputDirectory>lib</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<excludes>
<exclude>javax.servlet:servlet-api</exclude>
<exclude>org.eclipse.jetty.aggregate:jetty-all</exclude>
<exclude>org.slf4j:slf4j-log4j12</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

3
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java

@ -23,18 +23,17 @@ import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadPoolExecutors;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.runner.MasterSchedulerThread;
import org.apache.dolphinscheduler.server.quartz.ProcessScheduleJob;
import org.apache.dolphinscheduler.server.quartz.QuartzExecutors;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.zk.ZKMasterClient;
import org.quartz.SchedulerException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan;

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterBaseTaskExecThread.java

@ -18,13 +18,13 @@ package org.apache.dolphinscheduler.server.master.runner;
import org.apache.dolphinscheduler.common.queue.ITaskQueue;
import org.apache.dolphinscheduler.common.queue.TaskQueueFactory;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.BeanContext;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

1
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java

@ -34,7 +34,6 @@ import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.AlertManager;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterSchedulerThread.java

@ -22,12 +22,12 @@ import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.common.zk.AbstractZKClient;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.zk.ZKMasterClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

1
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/SubProcessTaskExecThread.java

@ -72,7 +72,6 @@ public class SubProcessTaskExecThread extends MasterBaseTaskExecThread {
this.taskInstance.setState(ExecutionStatus.KILL);
}else{
this.taskInstance.setState(subProcessInstance.getState());
result = true;
}
}
taskInstance.setEndTime(new Date());

126
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/AbstractMonitor.java

@ -0,0 +1,126 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.monitor;
import org.apache.commons.lang3.StringUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* abstract server monitor and auto restart server
*/
@Component
public abstract class AbstractMonitor implements Monitor {
private static final Logger logger = LoggerFactory.getLogger(AbstractMonitor.class);
@Autowired
private RunConfig runConfig;
/**
* monitor server and restart
*/
@Override
public void monitor(String masterPath,String workerPath,Integer port,String installPath) {
try {
restartServer(masterPath,port,installPath);
restartServer(workerPath,port,installPath);
}catch (Exception e){
logger.error("server start up error",e);
}
}
private void restartServer(String path,Integer port,String installPath) throws Exception{
String type = path.split("/")[2];
String serverName = null;
String nodes = null;
if ("masters".equals(type)){
serverName = "master-server";
nodes = runConfig.getMasters();
}else if ("workers".equals(type)){
serverName = "worker-server";
nodes = runConfig.getWorkers();
}
Map<String, String> activeNodeMap = getActiveNodesByPath(path);
Set<String> needRestartServer = getNeedRestartServer(getRunConfigServer(nodes),
activeNodeMap.keySet());
for (String node : needRestartServer){
// os.system('ssh -p ' + ssh_port + ' ' + self.get_ip_by_hostname(master) + ' sh ' + install_path + '/bin/dolphinscheduler-daemon.sh start master-server')
String runCmd = "ssh -p " + port + " " + node + " sh " + installPath + "/bin/dolphinscheduler-daemon.sh start " + serverName;
Runtime.getRuntime().exec(runCmd);
}
}
/**
* get need restart server
* @param deployedNodes deployedNodes
* @param activeNodes activeNodes
* @return need restart server
*/
private Set<String> getNeedRestartServer(Set<String> deployedNodes,Set<String> activeNodes){
if (CollectionUtils.isEmpty(activeNodes)){
return deployedNodes;
}
Set<String> result = new HashSet<>();
result.addAll(deployedNodes);
result.removeAll(activeNodes);
return result;
}
/**
* run config masters/workers
* @return master set/worker set
*/
private Set<String> getRunConfigServer(String nodes){
Set<String> nodeSet = new HashSet();
if (StringUtils.isEmpty(nodes)){
return null;
}
String[] nodeArr = nodes.split(",");
for (String node : nodeArr){
nodeSet.add(node);
}
return nodeSet;
}
/**
* get active nodes by path
* @param path path
* @return active nodes
*/
protected abstract Map<String,String> getActiveNodesByPath(String path);
}

28
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/Monitor.java

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.monitor;
/**
* server monitor and auto restart server
*/
public interface Monitor {
/**
* monitor server and restart
*/
void monitor(String masterPath, String workerPath, Integer port, String installPath);
}

63
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/MonitorServer.java

@ -0,0 +1,63 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.monitor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan;
/**
* monitor server
*/
@ComponentScan("org.apache.dolphinscheduler")
public class MonitorServer implements CommandLineRunner {
private static Integer ARGS_LENGTH = 4;
private static final Logger logger = LoggerFactory.getLogger(MonitorServer.class);
/**
* monitor
*/
@Autowired
private Monitor monitor;
public static void main(String[] args) throws Exception{
new SpringApplicationBuilder(MonitorServer.class).web(WebApplicationType.NONE).run(args);
}
@Override
public void run(String... args) throws Exception {
if (args.length != ARGS_LENGTH){
logger.error("Usage: <masterPath> <workerPath> <port> <installPath>");
return;
}
String masterPath = args[0];
String workerPath = args[1];
Integer port = Integer.parseInt(args[2]);
String installPath = args[3];
monitor.monitor(masterPath,workerPath,port,installPath);
}
}

85
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/RunConfig.java

@ -0,0 +1,85 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.monitor;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.PropertySource;
import org.springframework.stereotype.Component;
/**
* zookeeper conf
*/
@Component
@PropertySource("classpath:config/run_config.conf")
public class RunConfig {
//zk connect config
@Value("${masters}")
private String masters;
@Value("${workers}")
private String workers;
@Value("${alertServer}")
private String alertServer;
@Value("${apiServers}")
private String apiServers;
@Value("${sshPort}")
private String sshPort;
public String getMasters() {
return masters;
}
public void setMasters(String masters) {
this.masters = masters;
}
public String getWorkers() {
return workers;
}
public void setWorkers(String workers) {
this.workers = workers;
}
public String getAlertServer() {
return alertServer;
}
public void setAlertServer(String alertServer) {
this.alertServer = alertServer;
}
public String getApiServers() {
return apiServers;
}
public void setApiServers(String apiServers) {
this.apiServers = apiServers;
}
public String getSshPort() {
return sshPort;
}
public void setSshPort(String sshPort) {
this.sshPort = sshPort;
}
}

62
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/monitor/ZKMonitorImpl.java

@ -0,0 +1,62 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.monitor;
import org.apache.dolphinscheduler.common.zk.ZookeeperOperator;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* zk monitor server impl
*/
@Component
public class ZKMonitorImpl extends AbstractMonitor {
/**
* zookeeper operator
*/
@Autowired
private ZookeeperOperator zookeeperOperator;
/**
* get active nodes map by path
* @param path path
* @return active nodes map
*/
@Override
protected Map<String,String> getActiveNodesByPath(String path) {
Map<String,String> maps = new HashMap<>();
List<String> childrenList = zookeeperOperator.getChildrenKeys(path);
if (childrenList == null){
return maps;
}
for (String child : childrenList){
maps.put(child.split("_")[0],child);
}
return maps;
}
}

20
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/AlertManager.java

@ -90,15 +90,15 @@ public class AlertManager {
* process instance format
*/
private static final String PROCESS_INSTANCE_FORMAT =
"\"Id:%d\"," +
"\"Name:%s\"," +
"\"Job type: %s\"," +
"\"State: %s\"," +
"\"Recovery:%s\"," +
"\"Run time: %d\"," +
"\"Start time: %s\"," +
"\"End time: %s\"," +
"\"Host: %s\"" ;
"\"id:%d\"," +
"\"name:%s\"," +
"\"job type: %s\"," +
"\"state: %s\"," +
"\"recovery:%s\"," +
"\"run time: %d\"," +
"\"start time: %s\"," +
"\"end time: %s\"," +
"\"host: %s\"" ;
/**
* get process instance content
@ -234,7 +234,7 @@ public class AlertManager {
String cmdName = getCommandCnName(processInstance.getCommandType());
String success = processInstance.getState().typeIsSuccess() ? "success" :"failed";
alert.setTitle(cmdName + success);
alert.setTitle(cmdName + " " + success);
ShowType showType = processInstance.getState().typeIsSuccess() ? ShowType.TEXT : ShowType.TABLE;
alert.setShowType(showType);
String content = getContentProcessInstance(processInstance, taskInstances);

75
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java

@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import org.apache.commons.lang.StringUtils;
import org.slf4j.LoggerFactory;
@ -44,9 +45,11 @@ public class FlinkArgsUtils {
*/
public static List<String> buildArgs(FlinkParameters param) {
List<String> args = new ArrayList<>();
String deployMode = "cluster";
if (StringUtils.isNotEmpty(param.getDeployMode())) {
deployMode = param.getDeployMode();
String tmpDeployMode = param.getDeployMode();
if (StringUtils.isNotEmpty(tmpDeployMode)) {
deployMode = tmpDeployMode;
}
if (!"local".equals(deployMode)) {
@ -54,68 +57,70 @@ public class FlinkArgsUtils {
args.add(Constants.FLINK_YARN_CLUSTER); //yarn-cluster
if (param.getSlot() != 0) {
int slot = param.getSlot();
if (slot != 0) {
args.add(Constants.FLINK_YARN_SLOT);
args.add(String.format("%d", param.getSlot())); //-ys
args.add(String.format("%d", slot)); //-ys
}
if (StringUtils.isNotEmpty(param.getAppName())) { //-ynm
String appName = param.getAppName();
if (StringUtils.isNotEmpty(appName)) { //-ynm
args.add(Constants.FLINK_APP_NAME);
args.add(param.getAppName());
args.add(appName);
}
if (param.getTaskManager() != 0) { //-yn
int taskManager = param.getTaskManager();
if (taskManager != 0) { //-yn
args.add(Constants.FLINK_TASK_MANAGE);
args.add(String.format("%d", param.getTaskManager()));
args.add(String.format("%d", taskManager));
}
if (StringUtils.isNotEmpty(param.getJobManagerMemory())) {
String jobManagerMemory = param.getJobManagerMemory();
if (StringUtils.isNotEmpty(jobManagerMemory)) {
args.add(Constants.FLINK_JOB_MANAGE_MEM);
args.add(param.getJobManagerMemory()); //-yjm
args.add(jobManagerMemory); //-yjm
}
if (StringUtils.isNotEmpty(param.getTaskManagerMemory())) { // -ytm
String taskManagerMemory = param.getTaskManagerMemory();
if (StringUtils.isNotEmpty(taskManagerMemory)) { // -ytm
args.add(Constants.FLINK_TASK_MANAGE_MEM);
args.add(param.getTaskManagerMemory());
args.add(taskManagerMemory);
}
args.add(Constants.FLINK_detach); //-d
}
if (param.getProgramType() != null) {
if (param.getProgramType() != ProgramType.PYTHON) {
if (StringUtils.isNotEmpty(param.getMainClass())) {
args.add(Constants.FLINK_MAIN_CLASS); //-c
args.add(param.getMainClass()); //main class
}
}
ProgramType programType = param.getProgramType();
String mainClass = param.getMainClass();
if (programType != null && programType != ProgramType.PYTHON && StringUtils.isNotEmpty(mainClass)) {
args.add(Constants.FLINK_MAIN_CLASS); //-c
args.add(param.getMainClass()); //main class
}
if (param.getMainJar() != null) {
args.add(param.getMainJar().getRes());
ResourceInfo mainJar = param.getMainJar();
if (mainJar != null) {
args.add(mainJar.getRes());
}
if (StringUtils.isNotEmpty(param.getMainArgs())) {
args.add(param.getMainArgs());
String mainArgs = param.getMainArgs();
if (StringUtils.isNotEmpty(mainArgs)) {
args.add(mainArgs);
}
// --files --conf --libjar ...
if (StringUtils.isNotEmpty(param.getOthers())) {
String others = param.getOthers();
if (!others.contains("--qu")) {
if (StringUtils.isNotEmpty(param.getQueue()) && !deployMode.equals("local")) {
args.add(Constants.FLINK_QUEUE);
args.add(param.getQueue());
}
String others = param.getOthers();
String queue = param.getQueue();
if (StringUtils.isNotEmpty(others)) {
if (!others.contains(Constants.FLINK_QUEUE) && StringUtils.isNotEmpty(queue) && !deployMode.equals("local")) {
args.add(Constants.FLINK_QUEUE);
args.add(param.getQueue());
}
args.add(param.getOthers());
} else if (StringUtils.isNotEmpty(param.getQueue()) && !deployMode.equals("local")) {
args.add(others);
} else if (StringUtils.isNotEmpty(queue) && !deployMode.equals("local")) {
args.add(Constants.FLINK_QUEUE);
args.add(param.getQueue());
}
return args;

59
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/RemoveZKNode.java

@ -0,0 +1,59 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.zk.ZookeeperOperator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan;
@ComponentScan("org.apache.dolphinscheduler")
public class RemoveZKNode implements CommandLineRunner {
private static Integer ARGS_LENGTH = 1;
private static final Logger logger = LoggerFactory.getLogger(RemoveZKNode.class);
/**
* zookeeper operator
*/
@Autowired
private ZookeeperOperator zookeeperOperator;
public static void main(String[] args) {
new SpringApplicationBuilder(RemoveZKNode.class).web(WebApplicationType.NONE).run(args);
}
@Override
public void run(String... args) throws Exception {
if (args.length != ARGS_LENGTH){
logger.error("Usage: <node>");
return;
}
zookeeperOperator.remove(args[0]);
zookeeperOperator.close();
}
}

28
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/zk/AbstractListener.java → dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/SensitiveLogUtil.java

@ -14,22 +14,26 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.zk;
package org.apache.dolphinscheduler.server.utils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.curator.framework.recipes.cache.TreeCacheListener;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.common.Constants;
public abstract class AbstractListener implements TreeCacheListener {
/**
* sensitive log Util
*/
public class SensitiveLogUtil {
/**
* @param dataSourcePwd data source password
* @return String
*/
public static String maskDataSourcePwd(String dataSourcePwd){
@Override
public final void childEvent(final CuratorFramework client, final TreeCacheEvent event) throws Exception {
String path = null == event.getData() ? "" : event.getData().getPath();
if (path.isEmpty()) {
return;
if (StringUtils.isNotEmpty(dataSourcePwd)) {
dataSourcePwd = Constants.PASSWORD_DEFAULT;
}
dataChanged(client, event, path);
return dataSourcePwd;
}
protected abstract void dataChanged(final CuratorFramework client, final TreeCacheEvent event, final String path);
}

18
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java

@ -29,12 +29,12 @@ import org.apache.dolphinscheduler.common.thread.ThreadPoolExecutors;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.common.zk.AbstractZKClient;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.runner.FetchTaskThread;
import org.apache.dolphinscheduler.server.zk.ZKWorkerClient;
@ -290,22 +290,20 @@ public class WorkerServer implements IStoppable {
Runnable killProcessThread = new Runnable() {
@Override
public void run() {
Set<String> taskInfoSet = taskQueue.smembers(Constants.DOLPHINSCHEDULER_TASKS_KILL);
while (Stopper.isRunning()){
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (InterruptedException e) {
logger.error("interrupted exception",e);
}
// if set is null , return
Set<String> taskInfoSet = taskQueue.smembers(Constants.DOLPHINSCHEDULER_TASKS_KILL);
if (CollectionUtils.isNotEmpty(taskInfoSet)){
for (String taskInfo : taskInfoSet){
killTask(taskInfo, processDao);
removeKillInfoFromQueue(taskInfo);
}
}
taskInfoSet = taskQueue.smembers(Constants.DOLPHINSCHEDULER_TASKS_KILL);
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (InterruptedException e) {
logger.error("interrupted exception",e);
Thread.currentThread().interrupt();
}
}
}
};

92
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/log/SensitiveDataConverter.java

@ -0,0 +1,92 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.log;
import ch.qos.logback.classic.pattern.MessageConverter;
import ch.qos.logback.classic.spi.ILoggingEvent;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.server.utils.SensitiveLogUtil;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* sensitive data log converter
*/
@Slf4j
public class SensitiveDataConverter extends MessageConverter {
/**
* password pattern
*/
private final Pattern pwdPattern = Pattern.compile(Constants.DATASOURCE_PASSWORD_REGEX);
@Override
public String convert(ILoggingEvent event) {
// get original log
String requestLogMsg = event.getFormattedMessage();
// desensitization log
return convertMsg(requestLogMsg);
}
/**
* deal with sensitive log
*
* @param oriLogMsg original log
*/
private String convertMsg(final String oriLogMsg) {
String tempLogMsg = oriLogMsg;
if (StringUtils.isNotEmpty(tempLogMsg)) {
tempLogMsg = passwordHandler(pwdPattern, tempLogMsg);
}
return tempLogMsg;
}
/**
* password regex
*
* @param logMsg original log
*/
private String passwordHandler(Pattern pwdPattern, String logMsg) {
Matcher matcher = pwdPattern.matcher(logMsg);
StringBuffer sb = new StringBuffer(logMsg.length());
while (matcher.find()) {
String password = matcher.group();
String maskPassword = SensitiveLogUtil.maskDataSourcePwd(password);
matcher.appendReplacement(sb, maskPassword);
}
matcher.appendTail(sb);
return sb.toString();
}
}

3
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/FetchTaskThread.java

@ -25,12 +25,12 @@ import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.common.zk.AbstractZKClient;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.zk.ZKWorkerClient;
import org.slf4j.Logger;
@ -155,6 +155,7 @@ public class FetchTaskThread implements Runnable{
//whether have tasks, if no tasks , no need lock //get all tasks
List<String> tasksQueueList = taskQueue.getAllTasks(Constants.DOLPHINSCHEDULER_TASKS_QUEUE);
if (CollectionUtils.isEmpty(tasksQueueList)){
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
continue;
}
// creating distributed locks, lock path /dolphinscheduler/lock/worker

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java

@ -16,10 +16,10 @@
*/
package org.apache.dolphinscheduler.server.worker.task;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.slf4j.Logger;
/**

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/dependent/DependentExecute.java

@ -23,10 +23,10 @@ import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.DependentItem;
import org.apache.dolphinscheduler.common.utils.DependentUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/dependent/DependentTask.java

@ -25,9 +25,9 @@ import org.apache.dolphinscheduler.common.task.dependent.DependentParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.DependentUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.slf4j.Logger;

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/http/HttpTask.java

@ -30,10 +30,10 @@ import org.apache.dolphinscheduler.common.task.http.HttpParameters;
import org.apache.dolphinscheduler.common.utils.Bytes;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.http.HttpEntity;

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/processdure/ProcedureTask.java

@ -29,10 +29,10 @@ import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.procedure.ProcedureParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.slf4j.Logger;

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/python/PythonTask.java

@ -22,9 +22,9 @@ import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.python.PythonParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.PythonCommandExecutor;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTask.java

@ -23,9 +23,9 @@ import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.shell.ShellParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;

2
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java

@ -36,6 +36,7 @@ import org.apache.dolphinscheduler.common.task.sql.SqlType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.DataSource;
@ -43,7 +44,6 @@ import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.utils.UDFUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;

96
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java

@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.zk.AbstractListener;
import org.apache.dolphinscheduler.common.zk.AbstractZKClient;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.DaoFactory;
@ -31,9 +30,6 @@ import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.curator.utils.ThreadUtils;
import org.slf4j.Logger;
@ -101,12 +97,6 @@ public class ZKMasterClient extends AbstractZKClient {
// init system znode
this.initSystemZNode();
// monitor master
this.listenerMaster();
// monitor worker
this.listenerWorker();
// register master
this.registerMaster();
@ -158,31 +148,22 @@ public class ZKMasterClient extends AbstractZKClient {
}
}
/**
* monitor master
* handle path events that this class cares about
* @param client zkClient
* @param event path event
* @param path zk path
*/
public void listenerMaster(){
registerListener(getZNodeParentPath(ZKNodeType.MASTER), new AbstractListener() {
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
switch (event.getType()) {
case NODE_ADDED:
logger.info("master node added : {}", path);
break;
case NODE_REMOVED:
String serverHost = getHostByEventDataPath(path);
if (checkServerSelfDead(serverHost, ZKNodeType.MASTER)) {
return;
}
removeZKNodePath(path, ZKNodeType.MASTER, true);
break;
default:
break;
}
}
});
}
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
if(path.startsWith(getZNodeParentPath(ZKNodeType.MASTER)+Constants.SINGLE_SLASH)){ //monitor master
handleMasterEvent(event,path);
}else if(path.startsWith(getZNodeParentPath(ZKNodeType.WORKER)+Constants.SINGLE_SLASH)){ //monitor worker
handleWorkerEvent(event,path);
}
//other path event, ignore
}
/**
* remove zookeeper node path
@ -273,25 +254,40 @@ public class ZKMasterClient extends AbstractZKClient {
}
/**
* monitor worker
* monitor master
*/
public void listenerWorker(){
registerListener(getZNodeParentPath(ZKNodeType.WORKER), new AbstractListener() {
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
switch (event.getType()) {
case NODE_ADDED:
logger.info("worker node added : {}", path);
break;
case NODE_REMOVED:
logger.info("worker node deleted : {}", path);
removeZKNodePath(path, ZKNodeType.WORKER, true);
break;
default:
break;
public void handleMasterEvent(TreeCacheEvent event, String path){
switch (event.getType()) {
case NODE_ADDED:
logger.info("master node added : {}", path);
break;
case NODE_REMOVED:
String serverHost = getHostByEventDataPath(path);
if (checkServerSelfDead(serverHost, ZKNodeType.MASTER)) {
return;
}
}
});
removeZKNodePath(path, ZKNodeType.MASTER, true);
break;
default:
break;
}
}
/**
* monitor worker
*/
public void handleWorkerEvent(TreeCacheEvent event, String path){
switch (event.getType()) {
case NODE_ADDED:
logger.info("worker node added : {}", path);
break;
case NODE_REMOVED:
logger.info("worker node deleted : {}", path);
removeZKNodePath(path, ZKNodeType.WORKER, true);
break;
default:
break;
}
}

61
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKWorkerClient.java

@ -19,20 +19,13 @@ package org.apache.dolphinscheduler.server.zk;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.zk.AbstractListener;
import org.apache.dolphinscheduler.common.zk.AbstractZKClient;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
import org.apache.curator.utils.ThreadUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import java.util.concurrent.ThreadFactory;
/**
* zookeeper worker client
@ -61,9 +54,6 @@ public class ZKWorkerClient extends AbstractZKClient {
// init system znode
this.initSystemZNode();
// monitor worker
this.listenerWorker();
// register worker
this.registWorker();
}
@ -83,31 +73,38 @@ public class ZKWorkerClient extends AbstractZKClient {
System.exit(-1);
}
}
/**
* monitor worker
* handle path events that this class cares about
* @param client zkClient
* @param event path event
* @param path zk path
*/
private void listenerWorker(){
registerListener(getZNodeParentPath(ZKNodeType.WORKER), new AbstractListener() {
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
switch (event.getType()) {
case NODE_ADDED:
logger.info("worker node added : {}", path);
break;
case NODE_REMOVED:
//find myself dead
String serverHost = getHostByEventDataPath(path);
if(checkServerSelfDead(serverHost, ZKNodeType.WORKER)){
return;
}
break;
default:
break;
}
}
});
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
if(path.startsWith(getZNodeParentPath(ZKNodeType.WORKER)+Constants.SINGLE_SLASH)){
handleWorkerEvent(event,path);
}
}
/**
* monitor worker
*/
public void handleWorkerEvent(TreeCacheEvent event, String path){
switch (event.getType()) {
case NODE_ADDED:
logger.info("worker node added : {}", path);
break;
case NODE_REMOVED:
//find myself dead
String serverHost = getHostByEventDataPath(path);
if(checkServerSelfDead(serverHost, ZKNodeType.WORKER)){
return;
}
break;
default:
break;
}
}
/**

0
script/config/install_config.conf → dolphinscheduler-server/src/main/resources/config/install_config.conf

0
script/config/run_config.conf → dolphinscheduler-server/src/main/resources/config/run_config.conf

4
dolphinscheduler-server/src/main/resources/worker_logback.xml

@ -18,6 +18,8 @@
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<conversionRule conversionWord="msg"
converterClass="org.apache.dolphinscheduler.server.worker.log.SensitiveDataConverter"/>
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
@ -31,7 +33,7 @@
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.server.worker.log.TaskLogFilter"></filter>
<filter class="org.apache.dolphinscheduler.server.worker.log.TaskLogFilter"/>
<Discriminator class="org.apache.dolphinscheduler.server.worker.log.TaskLogDiscriminator">
<key>taskAppId</key>
<logBase>${log.base}</logBase>

131
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtilsTest.java

@ -0,0 +1,131 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
/**
* Test FlinkArgsUtils
*/
public class FlinkArgsUtilsTest {
private static final Logger logger = LoggerFactory.getLogger(FlinkArgsUtilsTest.class);
public String mode = "cluster";
public int slot = 2;
public String appName = "testFlink";
public int taskManager = 4;
public String taskManagerMemory = "2G";
public String jobManagerMemory = "4G";
public ProgramType programType = ProgramType.JAVA;
public String mainClass = "com.test";
public ResourceInfo mainJar = null;
public String mainArgs = "testArgs";
public String queue = "queue1";
public String others = "--input file:///home";
@Before
public void setUp() throws Exception {
ResourceInfo main = new ResourceInfo();
main.setRes("testflink-1.0.0-SNAPSHOT.jar");
mainJar = main;
}
/**
* Test buildArgs
*/
@Test
public void testBuildArgs() {
//Define params
FlinkParameters param = new FlinkParameters();
param.setDeployMode(mode);
param.setMainClass(mainClass);
param.setAppName(appName);
param.setSlot(slot);
param.setTaskManager(taskManager);
param.setJobManagerMemory(jobManagerMemory);
param.setTaskManagerMemory(taskManagerMemory);
param.setMainJar(mainJar);
param.setProgramType(programType);
param.setMainArgs(mainArgs);
param.setQueue(queue);
param.setOthers(others);
//Invoke buildArgs
List<String> result = FlinkArgsUtils.buildArgs(param);
for (String s : result) {
logger.info(s);
}
//Expected values and order
assertEquals(result.size(),20);
assertEquals(result.get(0),"-m");
assertEquals(result.get(1),"yarn-cluster");
assertEquals(result.get(2),"-ys");
assertSame(Integer.valueOf(result.get(3)),slot);
assertEquals(result.get(4),"-ynm");
assertEquals(result.get(5),appName);
assertEquals(result.get(6),"-yn");
assertSame(Integer.valueOf(result.get(7)),taskManager);
assertEquals(result.get(8),"-yjm");
assertEquals(result.get(9),jobManagerMemory);
assertEquals(result.get(10),"-ytm");
assertEquals(result.get(11),taskManagerMemory);
assertEquals(result.get(12),"-d");
assertEquals(result.get(13),"-c");
assertEquals(result.get(14),mainClass);
assertEquals(result.get(15),mainJar.getRes());
assertEquals(result.get(16),mainArgs);
assertEquals(result.get(17),"--qu");
assertEquals(result.get(18),queue);
assertEquals(result.get(19),others);
//Others param without --qu
FlinkParameters param1 = new FlinkParameters();
param1.setQueue(queue);
param1.setDeployMode(mode);
result = FlinkArgsUtils.buildArgs(param1);
assertEquals(result.size(),5);
}
}

37
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/SensitiveLogUtilTest.java

@ -0,0 +1,37 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.junit.Assert;
import org.junit.Test;
public class SensitiveLogUtilTest {
@Test
public void testMaskDataSourcePwd() {
String password = "123456";
String emptyPassword = "";
Assert.assertEquals(Constants.PASSWORD_DEFAULT, SensitiveLogUtil.maskDataSourcePwd(password));
Assert.assertEquals("", SensitiveLogUtil.maskDataSourcePwd(emptyPassword));
}
}

92
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/log/SensitiveDataConverterTest.java

@ -0,0 +1,92 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.log;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.server.utils.SensitiveLogUtil;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class SensitiveDataConverterTest {
private final Logger logger = LoggerFactory.getLogger(SensitiveDataConverterTest.class);
/**
* password pattern
*/
private final Pattern pwdPattern = Pattern.compile(Constants.DATASOURCE_PASSWORD_REGEX);
/**
* mask sensitive logMsg - sql task datasource password
*/
@Test
public void testPwdLogMsgConverter() {
String logMsg = "{\"address\":\"jdbc:mysql://192.168.xx.xx:3306\"," +
"\"database\":\"carbond\"," +
"\"jdbcUrl\":\"jdbc:mysql://192.168.xx.xx:3306/ods\"," +
"\"user\":\"view\"," +
"\"password\":\"view1\"}";
String maskLogMsg = "{\"address\":\"jdbc:mysql://192.168.xx.xx:3306\"," +
"\"database\":\"carbond\"," +
"\"jdbcUrl\":\"jdbc:mysql://192.168.xx.xx:3306/ods\"," +
"\"user\":\"view\"," +
"\"password\":\"******\"}";
logger.info("parameter : {}", logMsg);
logger.info("parameter : {}", passwordHandler(pwdPattern, logMsg));
Assert.assertNotEquals(logMsg, passwordHandler(pwdPattern, logMsg));
Assert.assertEquals(maskLogMsg, passwordHandler(pwdPattern, logMsg));
}
/**
* password regex test
*
* @param logMsg original log
*/
private static String passwordHandler(Pattern pattern, String logMsg) {
Matcher matcher = pattern.matcher(logMsg);
StringBuffer sb = new StringBuffer(logMsg.length());
while (matcher.find()) {
String password = matcher.group();
String maskPassword = SensitiveLogUtil.maskDataSourcePwd(password);
matcher.appendReplacement(sb, maskPassword);
}
matcher.appendTail(sb);
return sb.toString();
}
}

2
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/shell/ShellCommandExecutorTest.java

@ -20,10 +20,10 @@ import com.alibaba.fastjson.JSONObject;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.utils.LoggerUtils;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskManager;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;

2
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/sql/SqlExecutorTest.java

@ -21,10 +21,10 @@ import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.utils.LoggerUtils;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskManager;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;

165
dolphinscheduler-ui/pom.xml

@ -32,55 +32,120 @@
<node.version>v12.12.0</node.version>
<npm.version>6.11.3</npm.version>
</properties>
<profiles>
<profile>
<id>release</id>
<build>
<plugins>
<plugin>
<groupId>com.github.eirslett</groupId>
<artifactId>frontend-maven-plugin</artifactId>
<version>${frontend-maven-plugin.version}</version>
<executions>
<execution>
<id>install node and npm</id>
<goals>
<goal>install-node-and-npm</goal>
</goals>
<configuration>
<nodeVersion>${node.version}</nodeVersion>
<npmVersion>${npm.version}</npmVersion>
</configuration>
</execution>
<execution>
<id>npm install node-sass --unsafe-perm</id>
<goals>
<goal>npm</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<arguments>install node-sass --unsafe-perm</arguments>
</configuration>
</execution>
<execution>
<id>npm install</id>
<goals>
<goal>npm</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<arguments>install</arguments>
</configuration>
</execution>
<execution>
<id>npm run build:release</id>
<goals>
<goal>npm</goal>
</goals>
<configuration>
<arguments>run build:release</arguments>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>nginx</id>
<build>
<plugins>
<plugin>
<groupId>com.github.eirslett</groupId>
<artifactId>frontend-maven-plugin</artifactId>
<version>${frontend-maven-plugin.version}</version>
<executions>
<execution>
<id>install node and npm</id>
<goals>
<goal>install-node-and-npm</goal>
</goals>
<configuration>
<nodeVersion>${node.version}</nodeVersion>
<npmVersion>${npm.version}</npmVersion>
</configuration>
</execution>
<execution>
<id>npm install node-sass --unsafe-perm</id>
<goals>
<goal>npm</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<arguments>install node-sass --unsafe-perm</arguments>
</configuration>
</execution>
<execution>
<id>npm install</id>
<goals>
<goal>npm</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<arguments>install</arguments>
</configuration>
</execution>
<execution>
<id>npm run build</id>
<goals>
<goal>npm</goal>
</goals>
<configuration>
<arguments>run build</arguments>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
<build>
<plugins>
<plugin>
<groupId>com.github.eirslett</groupId>
<artifactId>frontend-maven-plugin</artifactId>
<version>${frontend-maven-plugin.version}</version>
<executions>
<execution>
<id>install node and npm</id>
<goals>
<goal>install-node-and-npm</goal>
</goals>
<configuration>
<nodeVersion>${node.version}</nodeVersion>
<npmVersion>${npm.version}</npmVersion>
</configuration>
</execution>
<execution>
<id>npm install node-sass --unsafe-perm</id>
<goals>
<goal>npm</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<arguments>install node-sass --unsafe-perm</arguments>
</configuration>
</execution>
<execution>
<id>npm install</id>
<goals>
<goal>npm</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<arguments>install</arguments>
</configuration>
</execution>
<execution>
<id>npm run build:release</id>
<goals>
<goal>npm</goal>
</goals>
<configuration>
<arguments>run build:release</arguments>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

31
install.sh

@ -469,8 +469,8 @@ sh ${workDir}/script/stop-all.sh
# 4,delete zk node
echo "4,delete zk node"
sleep 1
python ${workDir}/script/del-zk-node.py $zkQuorum $zkRoot
sh ${workDir}/script/remove-zk-node.sh $zkRoot
# 5,scp resources
echo "5,scp resources"
@ -485,29 +485,4 @@ fi
# 6,startup
echo "6,startup"
sh ${workDir}/script/start-all.sh
# 7,start monitoring self-starting script
monitor_pid=${workDir}/monitor_server.pid
if [ "true" = $monitorServerState ];then
if [ -f $monitor_pid ]; then
TARGET_PID=`cat $monitor_pid`
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo "monitor server running as process ${TARGET_PID}.Stopping"
kill $TARGET_PID
sleep 5
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo "monitor server did not stop gracefully after 5 seconds: killing with kill -9"
kill -9 $TARGET_PID
fi
else
echo "no monitor server to stop"
fi
echo "monitor server running as process ${TARGET_PID}.Stopped success"
rm -f $monitor_pid
fi
nohup python -u ${workDir}/script/monitor-server.py $installPath $zkQuorum $zkMasters $zkWorkers > ${workDir}/monitor-server.log 2>&1 &
echo $! > $monitor_pid
echo "start monitor server success as process `cat $monitor_pid`"
fi
sh ${workDir}/script/start-all.sh

19
pom.xml

@ -108,6 +108,8 @@
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.18.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
@ -524,6 +526,14 @@
<pluginManagement>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
@ -568,6 +578,12 @@
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
@ -655,6 +671,9 @@
<include>**/alert/utils/JSONUtilsTest.java</include>
<include>**/alert/utils/PropertyUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>

34
script/del-zk-node.py

@ -1,34 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import sys
from kazoo.client import KazooClient
class ZkClient:
def __init__(self):
self.zk = KazooClient(hosts=sys.argv[1])
self.zk.start()
def del_node(self):
self.zk.delete(sys.argv[2], recursive=True)
print('deleted success')
def __del__(self):
self.zk.stop()
if __name__ == '__main__':
zkclient = ZkClient()
zkclient.del_node()
time.sleep(2)

124
script/monitor-server.py

@ -1,124 +0,0 @@
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
1, yum install pip
yum -y install python-pip
2, pip install kazoo
pip install kazoo
or
3, conda install kazoo
conda install -c conda-forge kazoo
run script and parameter description
nohup python -u monitor_server.py /data1_1T/dolphinscheduler 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 /dolphinscheduler/masters /dolphinscheduler/workers> monitor_server.log 2>&1 &
the parameters are as follows:
/data1_1T/dolphinscheduler : the value comes from the installPath in install.sh
192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 : the value comes from zkQuorum in install.sh
the value comes from zkWorkers in install.sh
/dolphinscheduler/masters : the value comes from zkMasters in install.sh
/dolphinscheduler/workers : the value comes from zkWorkers in install.sh
'''
import sys
import socket
import os
import sched
import time
from datetime import datetime
from kazoo.client import KazooClient
schedule = sched.scheduler(time.time, time.sleep)
class ZkClient:
def __init__(self):
# hosts configuration zk address cluster
self.zk = KazooClient(hosts=zookeepers)
self.zk.start()
# read configuration files and assemble them into a dictionary
def read_file(self,path):
with open(path, 'r') as f:
dict = {}
for line in f.readlines():
arr = line.strip().split('=')
if (len(arr) == 2):
dict[arr[0]] = arr[1]
return dict
# get the ip address according to hostname
def get_ip_by_hostname(self,hostname):
return socket.gethostbyname(hostname)
# restart server
def restart_server(self,inc):
config_dict = self.read_file(install_path + '/conf/config/run_config.conf')
master_list = config_dict.get('masters').split(',')
print master_list
master_list = list(map(lambda item : self.get_ip_by_hostname(item),master_list))
worker_list = config_dict.get('workers').split(',')
print worker_list
worker_list = list(map(lambda item: self.get_ip_by_hostname(item), worker_list))
ssh_port = config_dict.get("sshPort")
print ssh_port
if (self.zk.exists(masters_zk_path)):
zk_master_list = []
zk_master_nodes = self.zk.get_children(masters_zk_path)
for zk_master_node in zk_master_nodes:
zk_master_list.append(zk_master_node.split('_')[0])
restart_master_list = list(set(master_list) - set(zk_master_list))
if (len(restart_master_list) != 0):
for master in restart_master_list:
print("master " + self.get_ip_by_hostname(master) + " server has down")
os.system('ssh -p ' + ssh_port + ' ' + self.get_ip_by_hostname(master) + ' sh ' + install_path + '/bin/dolphinscheduler-daemon.sh start master-server')
if (self.zk.exists(workers_zk_path)):
zk_worker_list = []
zk_worker_nodes = self.zk.get_children(workers_zk_path)
for zk_worker_node in zk_worker_nodes:
zk_worker_list.append(zk_worker_node.split('_')[0])
restart_worker_list = list(set(worker_list) - set(zk_worker_list))
if (len(restart_worker_list) != 0):
for worker in restart_worker_list:
print("worker " + self.get_ip_by_hostname(worker) + " server has down")
os.system('ssh -p ' + ssh_port + ' ' + self.get_ip_by_hostname(worker) + ' sh ' + install_path + '/bin/dolphinscheduler-daemon.sh start worker-server')
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
schedule.enter(inc, 0, self.restart_server, (inc,))
# default parameter 60s
def main(self,inc=60):
# the enter four parameters are: interval event, priority (sequence for simultaneous execution of two events arriving at the same time), function triggered by the call,
# the argument to the trigger function (tuple form)
schedule.enter(0, 0, self.restart_server, (inc,))
schedule.run()
if __name__ == '__main__':
if (len(sys.argv) < 4):
print('please input install_path,zookeepers,masters_zk_path and worker_zk_path')
install_path = sys.argv[1]
zookeepers = sys.argv[2]
masters_zk_path = sys.argv[3]
workers_zk_path = sys.argv[4]
zkClient = ZkClient()
zkClient.main(300)

52
script/monitor-server.sh

@ -0,0 +1,52 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
print_usage(){
printf $"USAGE:$0 masterPath workerPath port installPath\n"
exit 1
}
if [ $# -ne 4 ];then
print_usage
fi
masterPath=$1
workerPath=$2
port=$3
installPath=$4
BIN_DIR=`dirname $0`
BIN_DIR=`cd "$BIN_DIR"; pwd`
DOLPHINSCHEDULER_HOME=$BIN_DIR/..
export JAVA_HOME=$JAVA_HOME
export DOLPHINSCHEDULER_CONF_DIR=$DOLPHINSCHEDULER_HOME/conf
export DOLPHINSCHEDULER_LIB_JARS=$DOLPHINSCHEDULER_HOME/lib/*
export DOLPHINSCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
export STOP_TIMEOUT=5
CLASS=org.apache.dolphinscheduler.server.monitor.MonitorServer
exec_command="$DOLPHINSCHEDULER_OPTS -classpath $DOLPHINSCHEDULER_CONF_DIR:$DOLPHINSCHEDULER_LIB_JARS $CLASS $masterPath $workerPath $port $installPath"
cd $DOLPHINSCHEDULER_HOME
$JAVA_HOME/bin/java $exec_command

48
script/remove-zk-node.sh

@ -0,0 +1,48 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
print_usage(){
printf $"USAGE:$0 rootNode\n"
exit 1
}
if [ $# -ne 1 ];then
print_usage
fi
rootNode=$1
BIN_DIR=`dirname $0`
BIN_DIR=`cd "$BIN_DIR"; pwd`
DOLPHINSCHEDULER_HOME=$BIN_DIR/..
export JAVA_HOME=$JAVA_HOME
export DOLPHINSCHEDULER_CONF_DIR=$DOLPHINSCHEDULER_HOME/conf
export DOLPHINSCHEDULER_LIB_JARS=$DOLPHINSCHEDULER_HOME/lib/*
export DOLPHINSCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
export STOP_TIMEOUT=5
CLASS=org.apache.dolphinscheduler.server.utils.RemoveZKNode
exec_command="$DOLPHINSCHEDULER_OPTS -classpath $DOLPHINSCHEDULER_CONF_DIR:$DOLPHINSCHEDULER_LIB_JARS $CLASS $rootNode"
cd $DOLPHINSCHEDULER_HOME
$JAVA_HOME/bin/java $exec_command
Loading…
Cancel
Save