dailidong
5 years ago
86 changed files with 2955 additions and 672 deletions
@ -0,0 +1,24 @@ |
|||||||
|
version: '2' |
||||||
|
services: |
||||||
|
zookeeper: |
||||||
|
image: zookeeper |
||||||
|
restart: always |
||||||
|
container_name: zookeeper |
||||||
|
ports: |
||||||
|
- "2181:2181" |
||||||
|
environment: |
||||||
|
ZOO_MY_ID: 1 |
||||||
|
db: |
||||||
|
image: postgres |
||||||
|
container_name: postgres |
||||||
|
environment: |
||||||
|
- POSTGRES_USER=test |
||||||
|
- POSTGRES_PASSWORD=test |
||||||
|
- POSTGRES_DB=dolphinscheduler |
||||||
|
ports: |
||||||
|
- "5432:5432" |
||||||
|
volumes: |
||||||
|
- pgdata:/var/lib/postgresql/data |
||||||
|
- ./postgres/docker-entrypoint-initdb:/docker-entrypoint-initdb.d |
||||||
|
volumes: |
||||||
|
pgdata: |
@ -0,0 +1,771 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; |
||||||
|
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; |
||||||
|
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; |
||||||
|
DROP TABLE IF EXISTS QRTZ_LOCKS; |
||||||
|
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; |
||||||
|
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; |
||||||
|
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; |
||||||
|
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; |
||||||
|
DROP TABLE IF EXISTS QRTZ_TRIGGERS; |
||||||
|
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; |
||||||
|
DROP TABLE IF EXISTS QRTZ_CALENDARS; |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_JOB_DETAILS( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
JOB_NAME character varying(200) NOT NULL, |
||||||
|
JOB_GROUP character varying(200) NOT NULL, |
||||||
|
DESCRIPTION character varying(250) NULL, |
||||||
|
JOB_CLASS_NAME character varying(250) NOT NULL, |
||||||
|
IS_DURABLE boolean NOT NULL, |
||||||
|
IS_NONCONCURRENT boolean NOT NULL, |
||||||
|
IS_UPDATE_DATA boolean NOT NULL, |
||||||
|
REQUESTS_RECOVERY boolean NOT NULL, |
||||||
|
JOB_DATA bytea NULL); |
||||||
|
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_TRIGGERS ( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
TRIGGER_NAME character varying(200) NOT NULL, |
||||||
|
TRIGGER_GROUP character varying(200) NOT NULL, |
||||||
|
JOB_NAME character varying(200) NOT NULL, |
||||||
|
JOB_GROUP character varying(200) NOT NULL, |
||||||
|
DESCRIPTION character varying(250) NULL, |
||||||
|
NEXT_FIRE_TIME BIGINT NULL, |
||||||
|
PREV_FIRE_TIME BIGINT NULL, |
||||||
|
PRIORITY INTEGER NULL, |
||||||
|
TRIGGER_STATE character varying(16) NOT NULL, |
||||||
|
TRIGGER_TYPE character varying(8) NOT NULL, |
||||||
|
START_TIME BIGINT NOT NULL, |
||||||
|
END_TIME BIGINT NULL, |
||||||
|
CALENDAR_NAME character varying(200) NULL, |
||||||
|
MISFIRE_INSTR SMALLINT NULL, |
||||||
|
JOB_DATA bytea NULL) ; |
||||||
|
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
TRIGGER_NAME character varying(200) NOT NULL, |
||||||
|
TRIGGER_GROUP character varying(200) NOT NULL, |
||||||
|
REPEAT_COUNT BIGINT NOT NULL, |
||||||
|
REPEAT_INTERVAL BIGINT NOT NULL, |
||||||
|
TIMES_TRIGGERED BIGINT NOT NULL) ; |
||||||
|
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_CRON_TRIGGERS ( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
TRIGGER_NAME character varying(200) NOT NULL, |
||||||
|
TRIGGER_GROUP character varying(200) NOT NULL, |
||||||
|
CRON_EXPRESSION character varying(120) NOT NULL, |
||||||
|
TIME_ZONE_ID character varying(80)) ; |
||||||
|
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_SIMPROP_TRIGGERS |
||||||
|
( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
TRIGGER_NAME character varying(200) NOT NULL, |
||||||
|
TRIGGER_GROUP character varying(200) NOT NULL, |
||||||
|
STR_PROP_1 character varying(512) NULL, |
||||||
|
STR_PROP_2 character varying(512) NULL, |
||||||
|
STR_PROP_3 character varying(512) NULL, |
||||||
|
INT_PROP_1 INT NULL, |
||||||
|
INT_PROP_2 INT NULL, |
||||||
|
LONG_PROP_1 BIGINT NULL, |
||||||
|
LONG_PROP_2 BIGINT NULL, |
||||||
|
DEC_PROP_1 NUMERIC(13,4) NULL, |
||||||
|
DEC_PROP_2 NUMERIC(13,4) NULL, |
||||||
|
BOOL_PROP_1 boolean NULL, |
||||||
|
BOOL_PROP_2 boolean NULL) ; |
||||||
|
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_BLOB_TRIGGERS ( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
TRIGGER_NAME character varying(200) NOT NULL, |
||||||
|
TRIGGER_GROUP character varying(200) NOT NULL, |
||||||
|
BLOB_DATA bytea NULL) ; |
||||||
|
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_CALENDARS ( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
CALENDAR_NAME character varying(200) NOT NULL, |
||||||
|
CALENDAR bytea NOT NULL) ; |
||||||
|
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
TRIGGER_GROUP character varying(200) NOT NULL) ; |
||||||
|
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_FIRED_TRIGGERS ( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
ENTRY_ID character varying(95) NOT NULL, |
||||||
|
TRIGGER_NAME character varying(200) NOT NULL, |
||||||
|
TRIGGER_GROUP character varying(200) NOT NULL, |
||||||
|
INSTANCE_NAME character varying(200) NOT NULL, |
||||||
|
FIRED_TIME BIGINT NOT NULL, |
||||||
|
SCHED_TIME BIGINT NOT NULL, |
||||||
|
PRIORITY INTEGER NOT NULL, |
||||||
|
STATE character varying(16) NOT NULL, |
||||||
|
JOB_NAME character varying(200) NULL, |
||||||
|
JOB_GROUP character varying(200) NULL, |
||||||
|
IS_NONCONCURRENT boolean NULL, |
||||||
|
REQUESTS_RECOVERY boolean NULL) ; |
||||||
|
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_SCHEDULER_STATE ( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
INSTANCE_NAME character varying(200) NOT NULL, |
||||||
|
LAST_CHECKIN_TIME BIGINT NOT NULL, |
||||||
|
CHECKIN_INTERVAL BIGINT NOT NULL) ; |
||||||
|
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME); |
||||||
|
|
||||||
|
CREATE TABLE QRTZ_LOCKS ( |
||||||
|
SCHED_NAME character varying(120) NOT NULL, |
||||||
|
LOCK_NAME character varying(40) NOT NULL) ; |
||||||
|
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME); |
||||||
|
|
||||||
|
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY); |
||||||
|
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP); |
||||||
|
|
||||||
|
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); |
||||||
|
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP); |
||||||
|
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME); |
||||||
|
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); |
||||||
|
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE); |
||||||
|
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); |
||||||
|
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); |
||||||
|
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME); |
||||||
|
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); |
||||||
|
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); |
||||||
|
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); |
||||||
|
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); |
||||||
|
|
||||||
|
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME); |
||||||
|
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); |
||||||
|
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); |
||||||
|
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP); |
||||||
|
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||||
|
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); |
||||||
|
|
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_access_token |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_access_token; |
||||||
|
CREATE TABLE t_ds_access_token ( |
||||||
|
id int NOT NULL , |
||||||
|
user_id int DEFAULT NULL , |
||||||
|
token varchar(64) DEFAULT NULL , |
||||||
|
expire_time timestamp DEFAULT NULL , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_alert |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_alert; |
||||||
|
CREATE TABLE t_ds_alert ( |
||||||
|
id int NOT NULL , |
||||||
|
title varchar(64) DEFAULT NULL , |
||||||
|
show_type int DEFAULT NULL , |
||||||
|
content text , |
||||||
|
alert_type int DEFAULT NULL , |
||||||
|
alert_status int DEFAULT '0' , |
||||||
|
log text , |
||||||
|
alertgroup_id int DEFAULT NULL , |
||||||
|
receivers text , |
||||||
|
receivers_cc text , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_alertgroup |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_alertgroup; |
||||||
|
CREATE TABLE t_ds_alertgroup ( |
||||||
|
id int NOT NULL , |
||||||
|
group_name varchar(255) DEFAULT NULL , |
||||||
|
group_type int DEFAULT NULL , |
||||||
|
description varchar(255) DEFAULT NULL , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_command |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_command; |
||||||
|
CREATE TABLE t_ds_command ( |
||||||
|
id int NOT NULL , |
||||||
|
command_type int DEFAULT NULL , |
||||||
|
process_definition_id int DEFAULT NULL , |
||||||
|
command_param text , |
||||||
|
task_depend_type int DEFAULT NULL , |
||||||
|
failure_strategy int DEFAULT '0' , |
||||||
|
warning_type int DEFAULT '0' , |
||||||
|
warning_group_id int DEFAULT NULL , |
||||||
|
schedule_time timestamp DEFAULT NULL , |
||||||
|
start_time timestamp DEFAULT NULL , |
||||||
|
executor_id int DEFAULT NULL , |
||||||
|
dependence varchar(255) DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
process_instance_priority int DEFAULT NULL , |
||||||
|
worker_group_id int DEFAULT '-1' , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_datasource |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_datasource; |
||||||
|
CREATE TABLE t_ds_datasource ( |
||||||
|
id int NOT NULL , |
||||||
|
name varchar(64) NOT NULL , |
||||||
|
note varchar(256) DEFAULT NULL , |
||||||
|
type int NOT NULL , |
||||||
|
user_id int NOT NULL , |
||||||
|
connection_params text NOT NULL , |
||||||
|
create_time timestamp NOT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_error_command |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_error_command; |
||||||
|
CREATE TABLE t_ds_error_command ( |
||||||
|
id int NOT NULL , |
||||||
|
command_type int DEFAULT NULL , |
||||||
|
executor_id int DEFAULT NULL , |
||||||
|
process_definition_id int DEFAULT NULL , |
||||||
|
command_param text , |
||||||
|
task_depend_type int DEFAULT NULL , |
||||||
|
failure_strategy int DEFAULT '0' , |
||||||
|
warning_type int DEFAULT '0' , |
||||||
|
warning_group_id int DEFAULT NULL , |
||||||
|
schedule_time timestamp DEFAULT NULL , |
||||||
|
start_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
dependence text , |
||||||
|
process_instance_priority int DEFAULT NULL , |
||||||
|
worker_group_id int DEFAULT '-1' , |
||||||
|
message text , |
||||||
|
PRIMARY KEY (id) |
||||||
|
); |
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_master_server |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_master_server; |
||||||
|
CREATE TABLE t_ds_master_server ( |
||||||
|
id int NOT NULL , |
||||||
|
host varchar(45) DEFAULT NULL , |
||||||
|
port int DEFAULT NULL , |
||||||
|
zk_directory varchar(64) DEFAULT NULL , |
||||||
|
res_info varchar(256) DEFAULT NULL , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
last_heartbeat_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_process_definition |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_process_definition; |
||||||
|
CREATE TABLE t_ds_process_definition ( |
||||||
|
id int NOT NULL , |
||||||
|
name varchar(255) DEFAULT NULL , |
||||||
|
version int DEFAULT NULL , |
||||||
|
release_state int DEFAULT NULL , |
||||||
|
project_id int DEFAULT NULL , |
||||||
|
user_id int DEFAULT NULL , |
||||||
|
process_definition_json text , |
||||||
|
description text , |
||||||
|
global_params text , |
||||||
|
flag int DEFAULT NULL , |
||||||
|
locations text , |
||||||
|
connects text , |
||||||
|
receivers text , |
||||||
|
receivers_cc text , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
timeout int DEFAULT '0' , |
||||||
|
tenant_id int NOT NULL DEFAULT '-1' , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
create index process_definition_index on t_ds_process_definition (project_id,id); |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_process_instance |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_process_instance; |
||||||
|
CREATE TABLE t_ds_process_instance ( |
||||||
|
id int NOT NULL , |
||||||
|
name varchar(255) DEFAULT NULL , |
||||||
|
process_definition_id int DEFAULT NULL , |
||||||
|
state int DEFAULT NULL , |
||||||
|
recovery int DEFAULT NULL , |
||||||
|
start_time timestamp DEFAULT NULL , |
||||||
|
end_time timestamp DEFAULT NULL , |
||||||
|
run_times int DEFAULT NULL , |
||||||
|
host varchar(45) DEFAULT NULL , |
||||||
|
command_type int DEFAULT NULL , |
||||||
|
command_param text , |
||||||
|
task_depend_type int DEFAULT NULL , |
||||||
|
max_try_times int DEFAULT '0' , |
||||||
|
failure_strategy int DEFAULT '0' , |
||||||
|
warning_type int DEFAULT '0' , |
||||||
|
warning_group_id int DEFAULT NULL , |
||||||
|
schedule_time timestamp DEFAULT NULL , |
||||||
|
command_start_time timestamp DEFAULT NULL , |
||||||
|
global_params text , |
||||||
|
process_instance_json text , |
||||||
|
flag int DEFAULT '1' , |
||||||
|
update_time timestamp NULL , |
||||||
|
is_sub_process int DEFAULT '0' , |
||||||
|
executor_id int NOT NULL , |
||||||
|
locations text , |
||||||
|
connects text , |
||||||
|
history_cmd text , |
||||||
|
dependence_schedule_times text , |
||||||
|
process_instance_priority int DEFAULT NULL , |
||||||
|
worker_group_id int DEFAULT '-1' , |
||||||
|
timeout int DEFAULT '0' , |
||||||
|
tenant_id int NOT NULL DEFAULT '-1' , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
create index process_instance_index on t_ds_process_instance (process_definition_id,id); |
||||||
|
create index start_time_index on t_ds_process_instance (start_time); |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_project |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_project; |
||||||
|
CREATE TABLE t_ds_project ( |
||||||
|
id int NOT NULL , |
||||||
|
name varchar(100) DEFAULT NULL , |
||||||
|
description varchar(200) DEFAULT NULL , |
||||||
|
user_id int DEFAULT NULL , |
||||||
|
flag int DEFAULT '1' , |
||||||
|
create_time timestamp DEFAULT CURRENT_TIMESTAMP , |
||||||
|
update_time timestamp DEFAULT CURRENT_TIMESTAMP , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
create index user_id_index on t_ds_project (user_id); |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_queue |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_queue; |
||||||
|
CREATE TABLE t_ds_queue ( |
||||||
|
id int NOT NULL , |
||||||
|
queue_name varchar(64) DEFAULT NULL , |
||||||
|
queue varchar(64) DEFAULT NULL , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
); |
||||||
|
|
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_relation_datasource_user |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_relation_datasource_user; |
||||||
|
CREATE TABLE t_ds_relation_datasource_user ( |
||||||
|
id int NOT NULL , |
||||||
|
user_id int NOT NULL , |
||||||
|
datasource_id int DEFAULT NULL , |
||||||
|
perm int DEFAULT '1' , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_relation_process_instance |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_relation_process_instance; |
||||||
|
CREATE TABLE t_ds_relation_process_instance ( |
||||||
|
id int NOT NULL , |
||||||
|
parent_process_instance_id int DEFAULT NULL , |
||||||
|
parent_task_instance_id int DEFAULT NULL , |
||||||
|
process_instance_id int DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_relation_project_user |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_relation_project_user; |
||||||
|
CREATE TABLE t_ds_relation_project_user ( |
||||||
|
id int NOT NULL , |
||||||
|
user_id int NOT NULL , |
||||||
|
project_id int DEFAULT NULL , |
||||||
|
perm int DEFAULT '1' , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
create index relation_project_user_id_index on t_ds_relation_project_user (user_id); |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_relation_resources_user |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_relation_resources_user; |
||||||
|
CREATE TABLE t_ds_relation_resources_user ( |
||||||
|
id int NOT NULL , |
||||||
|
user_id int NOT NULL , |
||||||
|
resources_id int DEFAULT NULL , |
||||||
|
perm int DEFAULT '1' , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_relation_udfs_user |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_relation_udfs_user; |
||||||
|
CREATE TABLE t_ds_relation_udfs_user ( |
||||||
|
id int NOT NULL , |
||||||
|
user_id int NOT NULL , |
||||||
|
udf_id int DEFAULT NULL , |
||||||
|
perm int DEFAULT '1' , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_relation_user_alertgroup |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; |
||||||
|
CREATE TABLE t_ds_relation_user_alertgroup ( |
||||||
|
id int NOT NULL, |
||||||
|
alertgroup_id int DEFAULT NULL, |
||||||
|
user_id int DEFAULT NULL, |
||||||
|
create_time timestamp DEFAULT NULL, |
||||||
|
update_time timestamp DEFAULT NULL, |
||||||
|
PRIMARY KEY (id) |
||||||
|
); |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_resources |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_resources; |
||||||
|
CREATE TABLE t_ds_resources ( |
||||||
|
id int NOT NULL , |
||||||
|
alias varchar(64) DEFAULT NULL , |
||||||
|
file_name varchar(64) DEFAULT NULL , |
||||||
|
description varchar(256) DEFAULT NULL , |
||||||
|
user_id int DEFAULT NULL , |
||||||
|
type int DEFAULT NULL , |
||||||
|
size bigint DEFAULT NULL , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_schedules |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_schedules; |
||||||
|
CREATE TABLE t_ds_schedules ( |
||||||
|
id int NOT NULL , |
||||||
|
process_definition_id int NOT NULL , |
||||||
|
start_time timestamp NOT NULL , |
||||||
|
end_time timestamp NOT NULL , |
||||||
|
crontab varchar(256) NOT NULL , |
||||||
|
failure_strategy int NOT NULL , |
||||||
|
user_id int NOT NULL , |
||||||
|
release_state int NOT NULL , |
||||||
|
warning_type int NOT NULL , |
||||||
|
warning_group_id int DEFAULT NULL , |
||||||
|
process_instance_priority int DEFAULT NULL , |
||||||
|
worker_group_id int DEFAULT '-1' , |
||||||
|
create_time timestamp NOT NULL , |
||||||
|
update_time timestamp NOT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
); |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_session |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_session; |
||||||
|
CREATE TABLE t_ds_session ( |
||||||
|
id varchar(64) NOT NULL , |
||||||
|
user_id int DEFAULT NULL , |
||||||
|
ip varchar(45) DEFAULT NULL , |
||||||
|
last_login_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
); |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_task_instance |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_task_instance; |
||||||
|
CREATE TABLE t_ds_task_instance ( |
||||||
|
id int NOT NULL , |
||||||
|
name varchar(255) DEFAULT NULL , |
||||||
|
task_type varchar(64) DEFAULT NULL , |
||||||
|
process_definition_id int DEFAULT NULL , |
||||||
|
process_instance_id int DEFAULT NULL , |
||||||
|
task_json text , |
||||||
|
state int DEFAULT NULL , |
||||||
|
submit_time timestamp DEFAULT NULL , |
||||||
|
start_time timestamp DEFAULT NULL , |
||||||
|
end_time timestamp DEFAULT NULL , |
||||||
|
host varchar(45) DEFAULT NULL , |
||||||
|
execute_path varchar(200) DEFAULT NULL , |
||||||
|
log_path varchar(200) DEFAULT NULL , |
||||||
|
alert_flag int DEFAULT NULL , |
||||||
|
retry_times int DEFAULT '0' , |
||||||
|
pid int DEFAULT NULL , |
||||||
|
app_link varchar(255) DEFAULT NULL , |
||||||
|
flag int DEFAULT '1' , |
||||||
|
retry_interval int DEFAULT NULL , |
||||||
|
max_retry_times int DEFAULT NULL , |
||||||
|
task_instance_priority int DEFAULT NULL , |
||||||
|
worker_group_id int DEFAULT '-1' , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_tenant |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_tenant; |
||||||
|
CREATE TABLE t_ds_tenant ( |
||||||
|
id int NOT NULL , |
||||||
|
tenant_code varchar(64) DEFAULT NULL , |
||||||
|
tenant_name varchar(64) DEFAULT NULL , |
||||||
|
description varchar(256) DEFAULT NULL , |
||||||
|
queue_id int DEFAULT NULL , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_udfs |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_udfs; |
||||||
|
CREATE TABLE t_ds_udfs ( |
||||||
|
id int NOT NULL , |
||||||
|
user_id int NOT NULL , |
||||||
|
func_name varchar(100) NOT NULL , |
||||||
|
class_name varchar(255) NOT NULL , |
||||||
|
type int NOT NULL , |
||||||
|
arg_types varchar(255) DEFAULT NULL , |
||||||
|
database varchar(255) DEFAULT NULL , |
||||||
|
description varchar(255) DEFAULT NULL , |
||||||
|
resource_id int NOT NULL , |
||||||
|
resource_name varchar(255) NOT NULL , |
||||||
|
create_time timestamp NOT NULL , |
||||||
|
update_time timestamp NOT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_user |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_user; |
||||||
|
CREATE TABLE t_ds_user ( |
||||||
|
id int NOT NULL , |
||||||
|
user_name varchar(64) DEFAULT NULL , |
||||||
|
user_password varchar(64) DEFAULT NULL , |
||||||
|
user_type int DEFAULT NULL , |
||||||
|
email varchar(64) DEFAULT NULL , |
||||||
|
phone varchar(11) DEFAULT NULL , |
||||||
|
tenant_id int DEFAULT NULL , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
queue varchar(64) DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
); |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_version |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_version; |
||||||
|
CREATE TABLE t_ds_version ( |
||||||
|
id int NOT NULL , |
||||||
|
version varchar(200) NOT NULL, |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
create index version_index on t_ds_version(version); |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_worker_group |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_worker_group; |
||||||
|
CREATE TABLE t_ds_worker_group ( |
||||||
|
id bigint NOT NULL , |
||||||
|
name varchar(256) DEFAULT NULL , |
||||||
|
ip_list varchar(256) DEFAULT NULL , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
update_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
-- |
||||||
|
-- Table structure for table t_ds_worker_server |
||||||
|
-- |
||||||
|
|
||||||
|
DROP TABLE IF EXISTS t_ds_worker_server; |
||||||
|
CREATE TABLE t_ds_worker_server ( |
||||||
|
id int NOT NULL , |
||||||
|
host varchar(45) DEFAULT NULL , |
||||||
|
port int DEFAULT NULL , |
||||||
|
zk_directory varchar(64) DEFAULT NULL , |
||||||
|
res_info varchar(255) DEFAULT NULL , |
||||||
|
create_time timestamp DEFAULT NULL , |
||||||
|
last_heartbeat_time timestamp DEFAULT NULL , |
||||||
|
PRIMARY KEY (id) |
||||||
|
) ; |
||||||
|
|
||||||
|
|
||||||
|
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_access_token_id_sequence; |
||||||
|
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_alert_id_sequence; |
||||||
|
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_alertgroup_id_sequence; |
||||||
|
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence'); |
||||||
|
|
||||||
|
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_command_id_sequence; |
||||||
|
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_datasource_id_sequence; |
||||||
|
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_master_server_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_master_server_id_sequence; |
||||||
|
ALTER TABLE t_ds_master_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_master_server_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_process_definition_id_sequence; |
||||||
|
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_process_instance_id_sequence; |
||||||
|
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_project_id_sequence; |
||||||
|
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_queue_id_sequence; |
||||||
|
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence'); |
||||||
|
|
||||||
|
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence; |
||||||
|
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence; |
||||||
|
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_relation_project_user_id_sequence; |
||||||
|
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence; |
||||||
|
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence; |
||||||
|
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_relation_user_alertgroup_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_relation_user_alertgroup_id_sequence; |
||||||
|
ALTER TABLE t_ds_relation_user_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_user_alertgroup_id_sequence'); |
||||||
|
|
||||||
|
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_resources_id_sequence; |
||||||
|
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_schedules_id_sequence; |
||||||
|
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_task_instance_id_sequence; |
||||||
|
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_tenant_id_sequence; |
||||||
|
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_udfs_id_sequence; |
||||||
|
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_user_id_sequence; |
||||||
|
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence'); |
||||||
|
|
||||||
|
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_version_id_sequence; |
||||||
|
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence'); |
||||||
|
|
||||||
|
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_worker_group_id_sequence; |
||||||
|
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence'); |
||||||
|
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence; |
||||||
|
CREATE SEQUENCE t_ds_worker_server_id_sequence; |
||||||
|
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence'); |
||||||
|
|
||||||
|
|
||||||
|
-- Records of t_ds_user,user : admin , password : dolphinscheduler123 |
||||||
|
INSERT INTO t_ds_user(user_name,user_password,user_type,email,phone,tenant_id,create_time,update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', 'xx', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22'); |
||||||
|
|
||||||
|
-- Records of t_ds_alertgroup,dolphinscheduler warning group |
||||||
|
INSERT INTO t_ds_alertgroup(group_name,group_type,description,create_time,update_time) VALUES ('dolphinscheduler warning group', '0', 'dolphinscheduler warning group','2018-11-29 10:20:39', '2018-11-29 10:20:39'); |
||||||
|
INSERT INTO t_ds_relation_user_alertgroup(alertgroup_id,user_id,create_time,update_time) VALUES ( '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); |
||||||
|
|
||||||
|
-- Records of t_ds_queue,default queue name : default |
||||||
|
INSERT INTO t_ds_queue(queue_name,queue,create_time,update_time) VALUES ('default', 'default','2018-11-29 10:22:33', '2018-11-29 10:22:33'); |
||||||
|
|
||||||
|
-- Records of t_ds_queue,default queue name : default |
||||||
|
INSERT INTO t_ds_version(version) VALUES ('1.2.0'); |
@ -0,0 +1,130 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.common.utils; |
||||||
|
|
||||||
|
import com.alibaba.fastjson.JSONObject; |
||||||
|
import org.apache.commons.lang.time.DateUtils; |
||||||
|
import org.apache.dolphinscheduler.common.enums.CommandType; |
||||||
|
import org.apache.dolphinscheduler.common.enums.DataType; |
||||||
|
import org.apache.dolphinscheduler.common.enums.Direct; |
||||||
|
import org.apache.dolphinscheduler.common.process.Property; |
||||||
|
import org.apache.dolphinscheduler.common.utils.placeholder.PlaceholderUtils; |
||||||
|
import org.junit.Assert; |
||||||
|
import org.junit.Test; |
||||||
|
import org.slf4j.Logger; |
||||||
|
import org.slf4j.LoggerFactory; |
||||||
|
import java.util.*; |
||||||
|
import static org.apache.dolphinscheduler.common.Constants.PARAMETER_FORMAT_TIME; |
||||||
|
import static org.apache.dolphinscheduler.common.utils.placeholder.TimePlaceholderUtils.replacePlaceholders; |
||||||
|
|
||||||
|
|
||||||
|
public class ParameterUtilsTest { |
||||||
|
public static final Logger logger = LoggerFactory.getLogger(ParameterUtilsTest.class); |
||||||
|
|
||||||
|
/** |
||||||
|
* Test convertParameterPlaceholders |
||||||
|
*/ |
||||||
|
@Test |
||||||
|
public void testConvertParameterPlaceholders() throws Exception { |
||||||
|
// parameterString,parameterMap is null
|
||||||
|
Assert.assertNull(ParameterUtils.convertParameterPlaceholders(null, null)); |
||||||
|
|
||||||
|
// parameterString is null,parameterMap is not null
|
||||||
|
Map<String, String> parameterMap = new HashMap<String,String>(); |
||||||
|
parameterMap.put("testParameter","testParameter"); |
||||||
|
Assert.assertNull(ParameterUtils.convertParameterPlaceholders(null, parameterMap)); |
||||||
|
|
||||||
|
// parameterString、parameterMap is not null
|
||||||
|
String parameterString = "test_parameter"; |
||||||
|
Assert.assertEquals(parameterString, ParameterUtils.convertParameterPlaceholders(parameterString, parameterMap)); |
||||||
|
|
||||||
|
//replace variable ${} form
|
||||||
|
parameterMap.put("testParameter2","${testParameter}"); |
||||||
|
Assert.assertEquals(parameterString,PlaceholderUtils.replacePlaceholders(parameterString, parameterMap, true)); |
||||||
|
|
||||||
|
// replace time $[...] form, eg. $[yyyyMMdd]
|
||||||
|
Date cronTime = new Date(); |
||||||
|
Assert.assertEquals(parameterString, replacePlaceholders(parameterString, cronTime, true)); |
||||||
|
|
||||||
|
// replace time $[...] form, eg. $[yyyyMMdd]
|
||||||
|
Date cronTimeStr = DateUtils.parseDate("20191220145900", new String[]{PARAMETER_FORMAT_TIME}); |
||||||
|
Assert.assertEquals(parameterString, replacePlaceholders(parameterString, cronTimeStr, true)); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Test curingGlobalParams |
||||||
|
*/ |
||||||
|
@Test |
||||||
|
public void testCuringGlobalParams() throws Exception { |
||||||
|
//define globalMap
|
||||||
|
Map<String, String> globalParamMap = new HashMap<>(); |
||||||
|
globalParamMap.put("globalParams1","Params1"); |
||||||
|
|
||||||
|
//define globalParamList
|
||||||
|
List<Property> globalParamList = new ArrayList<>(); |
||||||
|
|
||||||
|
//define scheduleTime
|
||||||
|
Date scheduleTime = DateUtils.parseDate("20191220145900", new String[]{PARAMETER_FORMAT_TIME}); |
||||||
|
|
||||||
|
//test globalParamList is null
|
||||||
|
String result = ParameterUtils.curingGlobalParams(globalParamMap, globalParamList, CommandType.START_CURRENT_TASK_PROCESS, scheduleTime); |
||||||
|
Assert.assertNull(result); |
||||||
|
Assert.assertNull(ParameterUtils.curingGlobalParams(null,null,CommandType.START_CURRENT_TASK_PROCESS,null)); |
||||||
|
Assert.assertNull(ParameterUtils.curingGlobalParams(globalParamMap,null,CommandType.START_CURRENT_TASK_PROCESS,scheduleTime)); |
||||||
|
|
||||||
|
//test globalParamList is not null
|
||||||
|
Property property=new Property("testGlobalParam", Direct.IN, DataType.VARCHAR,"testGlobalParam"); |
||||||
|
globalParamList.add(property); |
||||||
|
|
||||||
|
String result2 = ParameterUtils.curingGlobalParams(null,globalParamList,CommandType.START_CURRENT_TASK_PROCESS,scheduleTime); |
||||||
|
Assert.assertEquals(result2, JSONObject.toJSONString(globalParamList)); |
||||||
|
|
||||||
|
String result3 = ParameterUtils.curingGlobalParams(globalParamMap,globalParamList,CommandType.START_CURRENT_TASK_PROCESS,null); |
||||||
|
Assert.assertEquals(result3, JSONObject.toJSONString(globalParamList)); |
||||||
|
|
||||||
|
String result4 = ParameterUtils.curingGlobalParams(globalParamMap, globalParamList, CommandType.START_CURRENT_TASK_PROCESS, scheduleTime); |
||||||
|
Assert.assertEquals(result4, JSONObject.toJSONString(globalParamList)); |
||||||
|
|
||||||
|
//test var $ startsWith
|
||||||
|
globalParamMap.put("bizDate","${system.biz.date}"); |
||||||
|
globalParamMap.put("b1zCurdate","${system.biz.curdate}"); |
||||||
|
|
||||||
|
|
||||||
|
Property property2=new Property("testParamList1", Direct.IN, DataType.VARCHAR,"testParamList"); |
||||||
|
Property property3=new Property("testParamList2", Direct.IN, DataType.VARCHAR,"{testParamList1}"); |
||||||
|
Property property4=new Property("testParamList3", Direct.IN, DataType.VARCHAR,"${b1zCurdate}"); |
||||||
|
|
||||||
|
globalParamList.add(property2); |
||||||
|
globalParamList.add(property3); |
||||||
|
globalParamList.add(property4); |
||||||
|
|
||||||
|
String result5 = ParameterUtils.curingGlobalParams(globalParamMap, globalParamList, CommandType.START_CURRENT_TASK_PROCESS, scheduleTime); |
||||||
|
Assert.assertEquals(result5,JSONUtils.toJsonString(globalParamList)); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Test handleEscapes |
||||||
|
*/ |
||||||
|
@Test |
||||||
|
public void testHandleEscapes() throws Exception { |
||||||
|
Assert.assertNull(ParameterUtils.handleEscapes(null)); |
||||||
|
Assert.assertEquals("",ParameterUtils.handleEscapes("")); |
||||||
|
Assert.assertEquals("test Parameter",ParameterUtils.handleEscapes("test Parameter")); |
||||||
|
Assert.assertEquals("////%test////%Parameter",ParameterUtils.handleEscapes("%test%Parameter")); |
||||||
|
} |
||||||
|
|
||||||
|
} |
@ -0,0 +1,236 @@ |
|||||||
|
<!-- |
||||||
|
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
~ contributor license agreements. See the NOTICE file distributed with |
||||||
|
~ this work for additional information regarding copyright ownership. |
||||||
|
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
~ (the "License"); you may not use this file except in compliance with |
||||||
|
~ the License. You may obtain a copy of the License at |
||||||
|
~ |
||||||
|
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
~ |
||||||
|
~ Unless required by applicable law or agreed to in writing, software |
||||||
|
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
~ See the License for the specific language governing permissions and |
||||||
|
~ limitations under the License. |
||||||
|
--> |
||||||
|
|
||||||
|
<assembly |
||||||
|
xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0" |
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" |
||||||
|
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd"> |
||||||
|
<id>dolphinscheduler-nginx</id> |
||||||
|
<formats> |
||||||
|
<format>tar.gz</format> |
||||||
|
</formats> |
||||||
|
<includeBaseDirectory>true</includeBaseDirectory> |
||||||
|
<baseDirectory>${project.build.finalName}-dolphinscheduler-bin</baseDirectory> |
||||||
|
|
||||||
|
<fileSets> |
||||||
|
<!--alert start--> |
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-alert/src/main/resources</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.properties</include> |
||||||
|
<include>**/*.xml</include> |
||||||
|
<include>**/*.json</include> |
||||||
|
<include>**/*.ftl</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>./conf</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<!--alert end--> |
||||||
|
|
||||||
|
<!--api start--> |
||||||
|
<fileSet> |
||||||
|
<directory>src/main/resources</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.properties</include> |
||||||
|
<include>**/*.xml</include> |
||||||
|
<include>**/*.json</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>conf</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-common/src/main/resources</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.properties</include> |
||||||
|
<include>**/*.xml</include> |
||||||
|
<include>**/*.json</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>conf</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-common/src/main/resources/bin</directory> |
||||||
|
<includes> |
||||||
|
<include>*.*</include> |
||||||
|
</includes> |
||||||
|
<directoryMode>755</directoryMode> |
||||||
|
<outputDirectory>bin</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-dao/src/main/resources</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.properties</include> |
||||||
|
<include>**/*.xml</include> |
||||||
|
<include>**/*.json</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>conf</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-api/src/main/resources</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.properties</include> |
||||||
|
<include>**/*.xml</include> |
||||||
|
<include>**/*.json</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>conf</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<!--api end--> |
||||||
|
|
||||||
|
<!--server start--> |
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-server/src/main/resources</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.properties</include> |
||||||
|
<include>**/*.xml</include> |
||||||
|
<include>**/*.json</include> |
||||||
|
<include>config/*.*</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>conf</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-common/src/main/resources</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.properties</include> |
||||||
|
<include>**/*.xml</include> |
||||||
|
<include>**/*.json</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>conf</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-common/src/main/resources/bin</directory> |
||||||
|
<includes> |
||||||
|
<include>*.*</include> |
||||||
|
</includes> |
||||||
|
<directoryMode>755</directoryMode> |
||||||
|
<outputDirectory>bin</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-dao/src/main/resources</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.properties</include> |
||||||
|
<include>**/*.xml</include> |
||||||
|
<include>**/*.json</include> |
||||||
|
<include>**/*.yml</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>conf</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
<!--server end--> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-server/target/dolphinscheduler-server-${project.version}</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.*</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>.</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-api/target/dolphinscheduler-api-${project.version}</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.*</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>.</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-alert/target/dolphinscheduler-alert-${project.version}</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.*</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>.</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-ui/dist</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*.*</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>./ui/dist</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../dolphinscheduler-ui</directory> |
||||||
|
<includes> |
||||||
|
<include>install-dolphinscheduler-ui.sh</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>./ui</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../sql</directory> |
||||||
|
<includes> |
||||||
|
<include>**/*</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>./sql</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../script</directory> |
||||||
|
<includes> |
||||||
|
<include>*.*</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>./script</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../script</directory> |
||||||
|
<includes> |
||||||
|
<include>env/*.*</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>./conf</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/../script</directory> |
||||||
|
<includes> |
||||||
|
<include>start-all.sh</include> |
||||||
|
<include>stop-all.sh</include> |
||||||
|
<include>dolphinscheduler-daemon.sh</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>./bin</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/.././</directory> |
||||||
|
<includes> |
||||||
|
<include>*.sh</include> |
||||||
|
<include>*.py</include> |
||||||
|
<include>DISCLAIMER</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>.</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
<fileSet> |
||||||
|
<directory>${basedir}/release-docs</directory> |
||||||
|
<useDefaultExcludes>true</useDefaultExcludes> |
||||||
|
<includes> |
||||||
|
<include>**/*</include> |
||||||
|
</includes> |
||||||
|
<outputDirectory>.</outputDirectory> |
||||||
|
</fileSet> |
||||||
|
|
||||||
|
</fileSets> |
||||||
|
|
||||||
|
<dependencySets> |
||||||
|
<dependencySet> |
||||||
|
<outputDirectory>lib</outputDirectory> |
||||||
|
<useProjectArtifact>true</useProjectArtifact> |
||||||
|
<excludes> |
||||||
|
<exclude>javax.servlet:servlet-api</exclude> |
||||||
|
<exclude>org.eclipse.jetty.aggregate:jetty-all</exclude> |
||||||
|
<exclude>org.slf4j:slf4j-log4j12</exclude> |
||||||
|
</excludes> |
||||||
|
</dependencySet> |
||||||
|
</dependencySets> |
||||||
|
</assembly> |
@ -0,0 +1,126 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.server.monitor; |
||||||
|
|
||||||
|
import org.apache.commons.lang3.StringUtils; |
||||||
|
import org.apache.dolphinscheduler.common.utils.CollectionUtils; |
||||||
|
import org.slf4j.Logger; |
||||||
|
import org.slf4j.LoggerFactory; |
||||||
|
import org.springframework.beans.factory.annotation.Autowired; |
||||||
|
import org.springframework.stereotype.Component; |
||||||
|
|
||||||
|
import java.util.HashSet; |
||||||
|
import java.util.Map; |
||||||
|
import java.util.Set; |
||||||
|
|
||||||
|
/** |
||||||
|
* abstract server monitor and auto restart server |
||||||
|
*/ |
||||||
|
@Component |
||||||
|
public abstract class AbstractMonitor implements Monitor { |
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(AbstractMonitor.class); |
||||||
|
|
||||||
|
|
||||||
|
@Autowired |
||||||
|
private RunConfig runConfig; |
||||||
|
|
||||||
|
/** |
||||||
|
* monitor server and restart |
||||||
|
*/ |
||||||
|
@Override |
||||||
|
public void monitor(String masterPath,String workerPath,Integer port,String installPath) { |
||||||
|
try { |
||||||
|
restartServer(masterPath,port,installPath); |
||||||
|
restartServer(workerPath,port,installPath); |
||||||
|
}catch (Exception e){ |
||||||
|
logger.error("server start up error",e); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private void restartServer(String path,Integer port,String installPath) throws Exception{ |
||||||
|
|
||||||
|
String type = path.split("/")[2]; |
||||||
|
String serverName = null; |
||||||
|
String nodes = null; |
||||||
|
if ("masters".equals(type)){ |
||||||
|
serverName = "master-server"; |
||||||
|
nodes = runConfig.getMasters(); |
||||||
|
}else if ("workers".equals(type)){ |
||||||
|
serverName = "worker-server"; |
||||||
|
nodes = runConfig.getWorkers(); |
||||||
|
} |
||||||
|
|
||||||
|
Map<String, String> activeNodeMap = getActiveNodesByPath(path); |
||||||
|
|
||||||
|
Set<String> needRestartServer = getNeedRestartServer(getRunConfigServer(nodes), |
||||||
|
activeNodeMap.keySet()); |
||||||
|
|
||||||
|
for (String node : needRestartServer){ |
||||||
|
// os.system('ssh -p ' + ssh_port + ' ' + self.get_ip_by_hostname(master) + ' sh ' + install_path + '/bin/dolphinscheduler-daemon.sh start master-server')
|
||||||
|
String runCmd = "ssh -p " + port + " " + node + " sh " + installPath + "/bin/dolphinscheduler-daemon.sh start " + serverName; |
||||||
|
Runtime.getRuntime().exec(runCmd); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* get need restart server |
||||||
|
* @param deployedNodes deployedNodes |
||||||
|
* @param activeNodes activeNodes |
||||||
|
* @return need restart server |
||||||
|
*/ |
||||||
|
private Set<String> getNeedRestartServer(Set<String> deployedNodes,Set<String> activeNodes){ |
||||||
|
if (CollectionUtils.isEmpty(activeNodes)){ |
||||||
|
return deployedNodes; |
||||||
|
} |
||||||
|
|
||||||
|
Set<String> result = new HashSet<>(); |
||||||
|
|
||||||
|
result.addAll(deployedNodes); |
||||||
|
result.removeAll(activeNodes); |
||||||
|
|
||||||
|
return result; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* run config masters/workers |
||||||
|
* @return master set/worker set |
||||||
|
*/ |
||||||
|
private Set<String> getRunConfigServer(String nodes){ |
||||||
|
Set<String> nodeSet = new HashSet(); |
||||||
|
|
||||||
|
|
||||||
|
if (StringUtils.isEmpty(nodes)){ |
||||||
|
return null; |
||||||
|
} |
||||||
|
|
||||||
|
String[] nodeArr = nodes.split(","); |
||||||
|
|
||||||
|
for (String node : nodeArr){ |
||||||
|
nodeSet.add(node); |
||||||
|
} |
||||||
|
|
||||||
|
return nodeSet; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* get active nodes by path |
||||||
|
* @param path path |
||||||
|
* @return active nodes |
||||||
|
*/ |
||||||
|
protected abstract Map<String,String> getActiveNodesByPath(String path); |
||||||
|
} |
@ -0,0 +1,28 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.server.monitor; |
||||||
|
|
||||||
|
/** |
||||||
|
* server monitor and auto restart server |
||||||
|
*/ |
||||||
|
public interface Monitor { |
||||||
|
|
||||||
|
/** |
||||||
|
* monitor server and restart |
||||||
|
*/ |
||||||
|
void monitor(String masterPath, String workerPath, Integer port, String installPath); |
||||||
|
} |
@ -0,0 +1,63 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.server.monitor; |
||||||
|
|
||||||
|
import org.slf4j.Logger; |
||||||
|
import org.slf4j.LoggerFactory; |
||||||
|
import org.springframework.beans.factory.annotation.Autowired; |
||||||
|
import org.springframework.boot.CommandLineRunner; |
||||||
|
import org.springframework.boot.WebApplicationType; |
||||||
|
import org.springframework.boot.builder.SpringApplicationBuilder; |
||||||
|
import org.springframework.context.annotation.ComponentScan; |
||||||
|
|
||||||
|
/** |
||||||
|
* monitor server |
||||||
|
*/ |
||||||
|
@ComponentScan("org.apache.dolphinscheduler") |
||||||
|
public class MonitorServer implements CommandLineRunner { |
||||||
|
|
||||||
|
private static Integer ARGS_LENGTH = 4; |
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(MonitorServer.class); |
||||||
|
|
||||||
|
/** |
||||||
|
* monitor |
||||||
|
*/ |
||||||
|
@Autowired |
||||||
|
private Monitor monitor; |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception{ |
||||||
|
|
||||||
|
new SpringApplicationBuilder(MonitorServer.class).web(WebApplicationType.NONE).run(args); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void run(String... args) throws Exception { |
||||||
|
if (args.length != ARGS_LENGTH){ |
||||||
|
logger.error("Usage: <masterPath> <workerPath> <port> <installPath>"); |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
String masterPath = args[0]; |
||||||
|
String workerPath = args[1]; |
||||||
|
Integer port = Integer.parseInt(args[2]); |
||||||
|
String installPath = args[3]; |
||||||
|
monitor.monitor(masterPath,workerPath,port,installPath); |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,85 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.server.monitor; |
||||||
|
|
||||||
|
import org.springframework.beans.factory.annotation.Value; |
||||||
|
import org.springframework.context.annotation.PropertySource; |
||||||
|
import org.springframework.stereotype.Component; |
||||||
|
|
||||||
|
/** |
||||||
|
* zookeeper conf |
||||||
|
*/ |
||||||
|
@Component |
||||||
|
@PropertySource("classpath:config/run_config.conf") |
||||||
|
public class RunConfig { |
||||||
|
|
||||||
|
//zk connect config
|
||||||
|
@Value("${masters}") |
||||||
|
private String masters; |
||||||
|
|
||||||
|
@Value("${workers}") |
||||||
|
private String workers; |
||||||
|
|
||||||
|
@Value("${alertServer}") |
||||||
|
private String alertServer; |
||||||
|
|
||||||
|
@Value("${apiServers}") |
||||||
|
private String apiServers; |
||||||
|
|
||||||
|
@Value("${sshPort}") |
||||||
|
private String sshPort; |
||||||
|
|
||||||
|
public String getMasters() { |
||||||
|
return masters; |
||||||
|
} |
||||||
|
|
||||||
|
public void setMasters(String masters) { |
||||||
|
this.masters = masters; |
||||||
|
} |
||||||
|
|
||||||
|
public String getWorkers() { |
||||||
|
return workers; |
||||||
|
} |
||||||
|
|
||||||
|
public void setWorkers(String workers) { |
||||||
|
this.workers = workers; |
||||||
|
} |
||||||
|
|
||||||
|
public String getAlertServer() { |
||||||
|
return alertServer; |
||||||
|
} |
||||||
|
|
||||||
|
public void setAlertServer(String alertServer) { |
||||||
|
this.alertServer = alertServer; |
||||||
|
} |
||||||
|
|
||||||
|
public String getApiServers() { |
||||||
|
return apiServers; |
||||||
|
} |
||||||
|
|
||||||
|
public void setApiServers(String apiServers) { |
||||||
|
this.apiServers = apiServers; |
||||||
|
} |
||||||
|
|
||||||
|
public String getSshPort() { |
||||||
|
return sshPort; |
||||||
|
} |
||||||
|
|
||||||
|
public void setSshPort(String sshPort) { |
||||||
|
this.sshPort = sshPort; |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,62 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.server.monitor; |
||||||
|
|
||||||
|
import org.apache.dolphinscheduler.common.zk.ZookeeperOperator; |
||||||
|
import org.springframework.beans.factory.annotation.Autowired; |
||||||
|
import org.springframework.stereotype.Component; |
||||||
|
|
||||||
|
import java.util.HashMap; |
||||||
|
import java.util.List; |
||||||
|
import java.util.Map; |
||||||
|
|
||||||
|
/** |
||||||
|
* zk monitor server impl |
||||||
|
*/ |
||||||
|
@Component |
||||||
|
public class ZKMonitorImpl extends AbstractMonitor { |
||||||
|
|
||||||
|
/** |
||||||
|
* zookeeper operator |
||||||
|
*/ |
||||||
|
@Autowired |
||||||
|
private ZookeeperOperator zookeeperOperator; |
||||||
|
|
||||||
|
|
||||||
|
/** |
||||||
|
* get active nodes map by path |
||||||
|
* @param path path |
||||||
|
* @return active nodes map |
||||||
|
*/ |
||||||
|
@Override |
||||||
|
protected Map<String,String> getActiveNodesByPath(String path) { |
||||||
|
|
||||||
|
Map<String,String> maps = new HashMap<>(); |
||||||
|
|
||||||
|
List<String> childrenList = zookeeperOperator.getChildrenKeys(path); |
||||||
|
|
||||||
|
if (childrenList == null){ |
||||||
|
return maps; |
||||||
|
} |
||||||
|
|
||||||
|
for (String child : childrenList){ |
||||||
|
maps.put(child.split("_")[0],child); |
||||||
|
} |
||||||
|
|
||||||
|
return maps; |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,59 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.server.utils; |
||||||
|
|
||||||
|
import org.apache.dolphinscheduler.common.zk.ZookeeperOperator; |
||||||
|
import org.slf4j.Logger; |
||||||
|
import org.slf4j.LoggerFactory; |
||||||
|
import org.springframework.beans.factory.annotation.Autowired; |
||||||
|
import org.springframework.boot.CommandLineRunner; |
||||||
|
import org.springframework.boot.WebApplicationType; |
||||||
|
import org.springframework.boot.builder.SpringApplicationBuilder; |
||||||
|
import org.springframework.context.annotation.ComponentScan; |
||||||
|
|
||||||
|
@ComponentScan("org.apache.dolphinscheduler") |
||||||
|
public class RemoveZKNode implements CommandLineRunner { |
||||||
|
|
||||||
|
private static Integer ARGS_LENGTH = 1; |
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(RemoveZKNode.class); |
||||||
|
|
||||||
|
|
||||||
|
/** |
||||||
|
* zookeeper operator |
||||||
|
*/ |
||||||
|
@Autowired |
||||||
|
private ZookeeperOperator zookeeperOperator; |
||||||
|
|
||||||
|
public static void main(String[] args) { |
||||||
|
|
||||||
|
new SpringApplicationBuilder(RemoveZKNode.class).web(WebApplicationType.NONE).run(args); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void run(String... args) throws Exception { |
||||||
|
|
||||||
|
if (args.length != ARGS_LENGTH){ |
||||||
|
logger.error("Usage: <node>"); |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
zookeeperOperator.remove(args[0]); |
||||||
|
zookeeperOperator.close(); |
||||||
|
|
||||||
|
} |
||||||
|
} |
@ -0,0 +1,92 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.server.worker.log; |
||||||
|
|
||||||
|
|
||||||
|
import ch.qos.logback.classic.pattern.MessageConverter; |
||||||
|
import ch.qos.logback.classic.spi.ILoggingEvent; |
||||||
|
import lombok.extern.slf4j.Slf4j; |
||||||
|
import org.apache.commons.lang3.StringUtils; |
||||||
|
import org.apache.dolphinscheduler.common.Constants; |
||||||
|
import org.apache.dolphinscheduler.server.utils.SensitiveLogUtil; |
||||||
|
|
||||||
|
import java.util.regex.Matcher; |
||||||
|
import java.util.regex.Pattern; |
||||||
|
|
||||||
|
/** |
||||||
|
* sensitive data log converter |
||||||
|
*/ |
||||||
|
@Slf4j |
||||||
|
public class SensitiveDataConverter extends MessageConverter { |
||||||
|
|
||||||
|
/** |
||||||
|
* password pattern |
||||||
|
*/ |
||||||
|
private final Pattern pwdPattern = Pattern.compile(Constants.DATASOURCE_PASSWORD_REGEX); |
||||||
|
|
||||||
|
|
||||||
|
@Override |
||||||
|
public String convert(ILoggingEvent event) { |
||||||
|
|
||||||
|
// get original log
|
||||||
|
String requestLogMsg = event.getFormattedMessage(); |
||||||
|
|
||||||
|
// desensitization log
|
||||||
|
return convertMsg(requestLogMsg); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* deal with sensitive log |
||||||
|
* |
||||||
|
* @param oriLogMsg original log |
||||||
|
*/ |
||||||
|
private String convertMsg(final String oriLogMsg) { |
||||||
|
|
||||||
|
String tempLogMsg = oriLogMsg; |
||||||
|
|
||||||
|
if (StringUtils.isNotEmpty(tempLogMsg)) { |
||||||
|
tempLogMsg = passwordHandler(pwdPattern, tempLogMsg); |
||||||
|
} |
||||||
|
return tempLogMsg; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* password regex |
||||||
|
* |
||||||
|
* @param logMsg original log |
||||||
|
*/ |
||||||
|
private String passwordHandler(Pattern pwdPattern, String logMsg) { |
||||||
|
|
||||||
|
Matcher matcher = pwdPattern.matcher(logMsg); |
||||||
|
|
||||||
|
StringBuffer sb = new StringBuffer(logMsg.length()); |
||||||
|
|
||||||
|
while (matcher.find()) { |
||||||
|
|
||||||
|
String password = matcher.group(); |
||||||
|
|
||||||
|
String maskPassword = SensitiveLogUtil.maskDataSourcePwd(password); |
||||||
|
|
||||||
|
matcher.appendReplacement(sb, maskPassword); |
||||||
|
} |
||||||
|
matcher.appendTail(sb); |
||||||
|
|
||||||
|
return sb.toString(); |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
} |
@ -0,0 +1,131 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package org.apache.dolphinscheduler.server.utils; |
||||||
|
|
||||||
|
import org.apache.dolphinscheduler.common.enums.ProgramType; |
||||||
|
import org.apache.dolphinscheduler.common.process.ResourceInfo; |
||||||
|
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters; |
||||||
|
import org.junit.Before; |
||||||
|
import org.junit.Test; |
||||||
|
import org.slf4j.Logger; |
||||||
|
import org.slf4j.LoggerFactory; |
||||||
|
|
||||||
|
import java.util.List; |
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals; |
||||||
|
import static org.junit.Assert.assertSame; |
||||||
|
|
||||||
|
/** |
||||||
|
* Test FlinkArgsUtils |
||||||
|
*/ |
||||||
|
public class FlinkArgsUtilsTest { |
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(FlinkArgsUtilsTest.class); |
||||||
|
|
||||||
|
public String mode = "cluster"; |
||||||
|
public int slot = 2; |
||||||
|
public String appName = "testFlink"; |
||||||
|
public int taskManager = 4; |
||||||
|
public String taskManagerMemory = "2G"; |
||||||
|
public String jobManagerMemory = "4G"; |
||||||
|
public ProgramType programType = ProgramType.JAVA; |
||||||
|
public String mainClass = "com.test"; |
||||||
|
public ResourceInfo mainJar = null; |
||||||
|
public String mainArgs = "testArgs"; |
||||||
|
public String queue = "queue1"; |
||||||
|
public String others = "--input file:///home"; |
||||||
|
|
||||||
|
|
||||||
|
@Before |
||||||
|
public void setUp() throws Exception { |
||||||
|
|
||||||
|
ResourceInfo main = new ResourceInfo(); |
||||||
|
main.setRes("testflink-1.0.0-SNAPSHOT.jar"); |
||||||
|
mainJar = main; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Test buildArgs |
||||||
|
*/ |
||||||
|
@Test |
||||||
|
public void testBuildArgs() { |
||||||
|
|
||||||
|
//Define params
|
||||||
|
FlinkParameters param = new FlinkParameters(); |
||||||
|
param.setDeployMode(mode); |
||||||
|
param.setMainClass(mainClass); |
||||||
|
param.setAppName(appName); |
||||||
|
param.setSlot(slot); |
||||||
|
param.setTaskManager(taskManager); |
||||||
|
param.setJobManagerMemory(jobManagerMemory); |
||||||
|
param.setTaskManagerMemory(taskManagerMemory); |
||||||
|
param.setMainJar(mainJar); |
||||||
|
param.setProgramType(programType); |
||||||
|
param.setMainArgs(mainArgs); |
||||||
|
param.setQueue(queue); |
||||||
|
param.setOthers(others); |
||||||
|
|
||||||
|
//Invoke buildArgs
|
||||||
|
List<String> result = FlinkArgsUtils.buildArgs(param); |
||||||
|
for (String s : result) { |
||||||
|
logger.info(s); |
||||||
|
} |
||||||
|
|
||||||
|
//Expected values and order
|
||||||
|
assertEquals(result.size(),20); |
||||||
|
|
||||||
|
assertEquals(result.get(0),"-m"); |
||||||
|
assertEquals(result.get(1),"yarn-cluster"); |
||||||
|
|
||||||
|
assertEquals(result.get(2),"-ys"); |
||||||
|
assertSame(Integer.valueOf(result.get(3)),slot); |
||||||
|
|
||||||
|
assertEquals(result.get(4),"-ynm"); |
||||||
|
assertEquals(result.get(5),appName); |
||||||
|
|
||||||
|
assertEquals(result.get(6),"-yn"); |
||||||
|
assertSame(Integer.valueOf(result.get(7)),taskManager); |
||||||
|
|
||||||
|
assertEquals(result.get(8),"-yjm"); |
||||||
|
assertEquals(result.get(9),jobManagerMemory); |
||||||
|
|
||||||
|
assertEquals(result.get(10),"-ytm"); |
||||||
|
assertEquals(result.get(11),taskManagerMemory); |
||||||
|
|
||||||
|
assertEquals(result.get(12),"-d"); |
||||||
|
|
||||||
|
assertEquals(result.get(13),"-c"); |
||||||
|
assertEquals(result.get(14),mainClass); |
||||||
|
|
||||||
|
assertEquals(result.get(15),mainJar.getRes()); |
||||||
|
assertEquals(result.get(16),mainArgs); |
||||||
|
|
||||||
|
assertEquals(result.get(17),"--qu"); |
||||||
|
assertEquals(result.get(18),queue); |
||||||
|
|
||||||
|
assertEquals(result.get(19),others); |
||||||
|
|
||||||
|
//Others param without --qu
|
||||||
|
FlinkParameters param1 = new FlinkParameters(); |
||||||
|
param1.setQueue(queue); |
||||||
|
param1.setDeployMode(mode); |
||||||
|
result = FlinkArgsUtils.buildArgs(param1); |
||||||
|
assertEquals(result.size(),5); |
||||||
|
|
||||||
|
} |
||||||
|
} |
@ -0,0 +1,37 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.server.utils; |
||||||
|
|
||||||
|
|
||||||
|
import org.apache.dolphinscheduler.common.Constants; |
||||||
|
import org.junit.Assert; |
||||||
|
import org.junit.Test; |
||||||
|
|
||||||
|
|
||||||
|
public class SensitiveLogUtilTest { |
||||||
|
|
||||||
|
@Test |
||||||
|
public void testMaskDataSourcePwd() { |
||||||
|
|
||||||
|
String password = "123456"; |
||||||
|
String emptyPassword = ""; |
||||||
|
|
||||||
|
Assert.assertEquals(Constants.PASSWORD_DEFAULT, SensitiveLogUtil.maskDataSourcePwd(password)); |
||||||
|
Assert.assertEquals("", SensitiveLogUtil.maskDataSourcePwd(emptyPassword)); |
||||||
|
|
||||||
|
} |
||||||
|
} |
@ -0,0 +1,92 @@ |
|||||||
|
/* |
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
* contributor license agreements. See the NOTICE file distributed with |
||||||
|
* this work for additional information regarding copyright ownership. |
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
* (the "License"); you may not use this file except in compliance with |
||||||
|
* the License. You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
package org.apache.dolphinscheduler.server.worker.log; |
||||||
|
|
||||||
|
|
||||||
|
import org.apache.dolphinscheduler.common.Constants; |
||||||
|
import org.apache.dolphinscheduler.server.utils.SensitiveLogUtil; |
||||||
|
import org.junit.Assert; |
||||||
|
import org.junit.Test; |
||||||
|
import org.slf4j.Logger; |
||||||
|
import org.slf4j.LoggerFactory; |
||||||
|
|
||||||
|
import java.util.regex.Matcher; |
||||||
|
import java.util.regex.Pattern; |
||||||
|
|
||||||
|
public class SensitiveDataConverterTest { |
||||||
|
|
||||||
|
private final Logger logger = LoggerFactory.getLogger(SensitiveDataConverterTest.class); |
||||||
|
|
||||||
|
/** |
||||||
|
* password pattern |
||||||
|
*/ |
||||||
|
private final Pattern pwdPattern = Pattern.compile(Constants.DATASOURCE_PASSWORD_REGEX); |
||||||
|
|
||||||
|
|
||||||
|
/** |
||||||
|
* mask sensitive logMsg - sql task datasource password |
||||||
|
*/ |
||||||
|
@Test |
||||||
|
public void testPwdLogMsgConverter() { |
||||||
|
|
||||||
|
String logMsg = "{\"address\":\"jdbc:mysql://192.168.xx.xx:3306\"," + |
||||||
|
"\"database\":\"carbond\"," + |
||||||
|
"\"jdbcUrl\":\"jdbc:mysql://192.168.xx.xx:3306/ods\"," + |
||||||
|
"\"user\":\"view\"," + |
||||||
|
"\"password\":\"view1\"}"; |
||||||
|
|
||||||
|
String maskLogMsg = "{\"address\":\"jdbc:mysql://192.168.xx.xx:3306\"," + |
||||||
|
"\"database\":\"carbond\"," + |
||||||
|
"\"jdbcUrl\":\"jdbc:mysql://192.168.xx.xx:3306/ods\"," + |
||||||
|
"\"user\":\"view\"," + |
||||||
|
"\"password\":\"******\"}"; |
||||||
|
|
||||||
|
|
||||||
|
logger.info("parameter : {}", logMsg); |
||||||
|
logger.info("parameter : {}", passwordHandler(pwdPattern, logMsg)); |
||||||
|
|
||||||
|
Assert.assertNotEquals(logMsg, passwordHandler(pwdPattern, logMsg)); |
||||||
|
Assert.assertEquals(maskLogMsg, passwordHandler(pwdPattern, logMsg)); |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* password regex test |
||||||
|
* |
||||||
|
* @param logMsg original log |
||||||
|
*/ |
||||||
|
private static String passwordHandler(Pattern pattern, String logMsg) { |
||||||
|
|
||||||
|
Matcher matcher = pattern.matcher(logMsg); |
||||||
|
|
||||||
|
StringBuffer sb = new StringBuffer(logMsg.length()); |
||||||
|
|
||||||
|
while (matcher.find()) { |
||||||
|
|
||||||
|
String password = matcher.group(); |
||||||
|
|
||||||
|
String maskPassword = SensitiveLogUtil.maskDataSourcePwd(password); |
||||||
|
|
||||||
|
matcher.appendReplacement(sb, maskPassword); |
||||||
|
} |
||||||
|
matcher.appendTail(sb); |
||||||
|
|
||||||
|
return sb.toString(); |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
} |
@ -1,34 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
import time |
|
||||||
import sys |
|
||||||
from kazoo.client import KazooClient |
|
||||||
|
|
||||||
class ZkClient: |
|
||||||
def __init__(self): |
|
||||||
self.zk = KazooClient(hosts=sys.argv[1]) |
|
||||||
self.zk.start() |
|
||||||
def del_node(self): |
|
||||||
self.zk.delete(sys.argv[2], recursive=True) |
|
||||||
print('deleted success') |
|
||||||
def __del__(self): |
|
||||||
self.zk.stop() |
|
||||||
if __name__ == '__main__': |
|
||||||
zkclient = ZkClient() |
|
||||||
zkclient.del_node() |
|
||||||
time.sleep(2) |
|
@ -1,124 +0,0 @@ |
|||||||
#!/usr/bin/env python |
|
||||||
# -*- coding:utf-8 -*- |
|
||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
''' |
|
||||||
1, yum install pip |
|
||||||
yum -y install python-pip |
|
||||||
|
|
||||||
2, pip install kazoo |
|
||||||
pip install kazoo |
|
||||||
|
|
||||||
or |
|
||||||
|
|
||||||
3, conda install kazoo |
|
||||||
conda install -c conda-forge kazoo |
|
||||||
|
|
||||||
run script and parameter description: |
|
||||||
nohup python -u monitor_server.py /data1_1T/dolphinscheduler 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 /dolphinscheduler/masters /dolphinscheduler/workers> monitor_server.log 2>&1 & |
|
||||||
the parameters are as follows: |
|
||||||
/data1_1T/dolphinscheduler : the value comes from the installPath in install.sh |
|
||||||
192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 : the value comes from zkQuorum in install.sh |
|
||||||
the value comes from zkWorkers in install.sh |
|
||||||
/dolphinscheduler/masters : the value comes from zkMasters in install.sh |
|
||||||
/dolphinscheduler/workers : the value comes from zkWorkers in install.sh |
|
||||||
''' |
|
||||||
import sys |
|
||||||
import socket |
|
||||||
import os |
|
||||||
import sched |
|
||||||
import time |
|
||||||
from datetime import datetime |
|
||||||
from kazoo.client import KazooClient |
|
||||||
|
|
||||||
schedule = sched.scheduler(time.time, time.sleep) |
|
||||||
|
|
||||||
class ZkClient: |
|
||||||
def __init__(self): |
|
||||||
# hosts configuration zk address cluster |
|
||||||
self.zk = KazooClient(hosts=zookeepers) |
|
||||||
self.zk.start() |
|
||||||
|
|
||||||
# read configuration files and assemble them into a dictionary |
|
||||||
def read_file(self,path): |
|
||||||
with open(path, 'r') as f: |
|
||||||
dict = {} |
|
||||||
for line in f.readlines(): |
|
||||||
arr = line.strip().split('=') |
|
||||||
if (len(arr) == 2): |
|
||||||
dict[arr[0]] = arr[1] |
|
||||||
return dict |
|
||||||
|
|
||||||
# get the ip address according to hostname |
|
||||||
def get_ip_by_hostname(self,hostname): |
|
||||||
return socket.gethostbyname(hostname) |
|
||||||
|
|
||||||
# restart server |
|
||||||
def restart_server(self,inc): |
|
||||||
config_dict = self.read_file(install_path + '/conf/config/run_config.conf') |
|
||||||
|
|
||||||
master_list = config_dict.get('masters').split(',') |
|
||||||
print master_list |
|
||||||
master_list = list(map(lambda item : self.get_ip_by_hostname(item),master_list)) |
|
||||||
|
|
||||||
worker_list = config_dict.get('workers').split(',') |
|
||||||
print worker_list |
|
||||||
worker_list = list(map(lambda item: self.get_ip_by_hostname(item), worker_list)) |
|
||||||
|
|
||||||
ssh_port = config_dict.get("sshPort") |
|
||||||
print ssh_port |
|
||||||
|
|
||||||
if (self.zk.exists(masters_zk_path)): |
|
||||||
zk_master_list = [] |
|
||||||
zk_master_nodes = self.zk.get_children(masters_zk_path) |
|
||||||
for zk_master_node in zk_master_nodes: |
|
||||||
zk_master_list.append(zk_master_node.split('_')[0]) |
|
||||||
restart_master_list = list(set(master_list) - set(zk_master_list)) |
|
||||||
if (len(restart_master_list) != 0): |
|
||||||
for master in restart_master_list: |
|
||||||
print("master " + self.get_ip_by_hostname(master) + " server has down") |
|
||||||
os.system('ssh -p ' + ssh_port + ' ' + self.get_ip_by_hostname(master) + ' sh ' + install_path + '/bin/dolphinscheduler-daemon.sh start master-server') |
|
||||||
|
|
||||||
if (self.zk.exists(workers_zk_path)): |
|
||||||
zk_worker_list = [] |
|
||||||
zk_worker_nodes = self.zk.get_children(workers_zk_path) |
|
||||||
for zk_worker_node in zk_worker_nodes: |
|
||||||
zk_worker_list.append(zk_worker_node.split('_')[0]) |
|
||||||
restart_worker_list = list(set(worker_list) - set(zk_worker_list)) |
|
||||||
if (len(restart_worker_list) != 0): |
|
||||||
for worker in restart_worker_list: |
|
||||||
print("worker " + self.get_ip_by_hostname(worker) + " server has down") |
|
||||||
os.system('ssh -p ' + ssh_port + ' ' + self.get_ip_by_hostname(worker) + ' sh ' + install_path + '/bin/dolphinscheduler-daemon.sh start worker-server') |
|
||||||
|
|
||||||
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) |
|
||||||
schedule.enter(inc, 0, self.restart_server, (inc,)) |
|
||||||
# default parameter 60s |
|
||||||
def main(self,inc=60): |
|
||||||
# the enter four parameters are: interval event, priority (sequence for simultaneous execution of two events arriving at the same time), function triggered by the call, |
|
||||||
# the argument to the trigger function (tuple form) |
|
||||||
schedule.enter(0, 0, self.restart_server, (inc,)) |
|
||||||
schedule.run() |
|
||||||
if __name__ == '__main__': |
|
||||||
if (len(sys.argv) < 4): |
|
||||||
print('please input install_path,zookeepers,masters_zk_path and worker_zk_path') |
|
||||||
install_path = sys.argv[1] |
|
||||||
zookeepers = sys.argv[2] |
|
||||||
masters_zk_path = sys.argv[3] |
|
||||||
workers_zk_path = sys.argv[4] |
|
||||||
zkClient = ZkClient() |
|
||||||
zkClient.main(300) |
|
@ -0,0 +1,52 @@ |
|||||||
|
#!/bin/bash |
||||||
|
# |
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
# contributor license agreements. See the NOTICE file distributed with |
||||||
|
# this work for additional information regarding copyright ownership. |
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
# (the "License"); you may not use this file except in compliance with |
||||||
|
# the License. You may obtain a copy of the License at |
||||||
|
# |
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
# |
||||||
|
# Unless required by applicable law or agreed to in writing, software |
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
# See the License for the specific language governing permissions and |
||||||
|
# limitations under the License. |
||||||
|
# |
||||||
|
|
||||||
|
print_usage(){ |
||||||
|
printf $"USAGE:$0 masterPath workerPath port installPath\n" |
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
if [ $# -ne 4 ];then |
||||||
|
print_usage |
||||||
|
fi |
||||||
|
|
||||||
|
masterPath=$1 |
||||||
|
workerPath=$2 |
||||||
|
port=$3 |
||||||
|
installPath=$4 |
||||||
|
|
||||||
|
|
||||||
|
BIN_DIR=`dirname $0` |
||||||
|
BIN_DIR=`cd "$BIN_DIR"; pwd` |
||||||
|
DOLPHINSCHEDULER_HOME=$BIN_DIR/.. |
||||||
|
|
||||||
|
export JAVA_HOME=$JAVA_HOME |
||||||
|
|
||||||
|
|
||||||
|
export DOLPHINSCHEDULER_CONF_DIR=$DOLPHINSCHEDULER_HOME/conf |
||||||
|
export DOLPHINSCHEDULER_LIB_JARS=$DOLPHINSCHEDULER_HOME/lib/* |
||||||
|
|
||||||
|
export DOLPHINSCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70" |
||||||
|
export STOP_TIMEOUT=5 |
||||||
|
|
||||||
|
CLASS=org.apache.dolphinscheduler.server.monitor.MonitorServer |
||||||
|
|
||||||
|
exec_command="$DOLPHINSCHEDULER_OPTS -classpath $DOLPHINSCHEDULER_CONF_DIR:$DOLPHINSCHEDULER_LIB_JARS $CLASS $masterPath $workerPath $port $installPath" |
||||||
|
|
||||||
|
cd $DOLPHINSCHEDULER_HOME |
||||||
|
$JAVA_HOME/bin/java $exec_command |
@ -0,0 +1,48 @@ |
|||||||
|
#!/bin/bash |
||||||
|
# |
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||||
|
# contributor license agreements. See the NOTICE file distributed with |
||||||
|
# this work for additional information regarding copyright ownership. |
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||||
|
# (the "License"); you may not use this file except in compliance with |
||||||
|
# the License. You may obtain a copy of the License at |
||||||
|
# |
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
# |
||||||
|
# Unless required by applicable law or agreed to in writing, software |
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
# See the License for the specific language governing permissions and |
||||||
|
# limitations under the License. |
||||||
|
# |
||||||
|
|
||||||
|
print_usage(){ |
||||||
|
printf $"USAGE:$0 rootNode\n" |
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
if [ $# -ne 1 ];then |
||||||
|
print_usage |
||||||
|
fi |
||||||
|
|
||||||
|
rootNode=$1 |
||||||
|
|
||||||
|
BIN_DIR=`dirname $0` |
||||||
|
BIN_DIR=`cd "$BIN_DIR"; pwd` |
||||||
|
DOLPHINSCHEDULER_HOME=$BIN_DIR/.. |
||||||
|
|
||||||
|
export JAVA_HOME=$JAVA_HOME |
||||||
|
|
||||||
|
|
||||||
|
export DOLPHINSCHEDULER_CONF_DIR=$DOLPHINSCHEDULER_HOME/conf |
||||||
|
export DOLPHINSCHEDULER_LIB_JARS=$DOLPHINSCHEDULER_HOME/lib/* |
||||||
|
|
||||||
|
export DOLPHINSCHEDULER_OPTS="-server -Xmx1g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70" |
||||||
|
export STOP_TIMEOUT=5 |
||||||
|
|
||||||
|
CLASS=org.apache.dolphinscheduler.server.utils.RemoveZKNode |
||||||
|
|
||||||
|
exec_command="$DOLPHINSCHEDULER_OPTS -classpath $DOLPHINSCHEDULER_CONF_DIR:$DOLPHINSCHEDULER_LIB_JARS $CLASS $rootNode" |
||||||
|
|
||||||
|
cd $DOLPHINSCHEDULER_HOME |
||||||
|
$JAVA_HOME/bin/java $exec_command |
Loading…
Reference in new issue