gaojun2048
4 years ago
426 changed files with 9730 additions and 20133 deletions
@ -1,40 +0,0 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one |
||||
# or more contributor license agreements. See the NOTICE file |
||||
# distributed with this work for additional information |
||||
# regarding copyright ownership. The ASF licenses this file |
||||
# to you under the Apache License, Version 2.0 (the |
||||
# "License"); you may not use this file except in compliance |
||||
# with the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
version: '2' |
||||
services: |
||||
zookeeper: |
||||
image: zookeeper |
||||
restart: always |
||||
container_name: zookeeper |
||||
ports: |
||||
- "2181:2181" |
||||
environment: |
||||
ZOO_MY_ID: 1 |
||||
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons |
||||
db: |
||||
image: postgres |
||||
container_name: postgres |
||||
environment: |
||||
- POSTGRES_USER=test |
||||
- POSTGRES_PASSWORD=test |
||||
- POSTGRES_DB=dolphinscheduler |
||||
ports: |
||||
- "5432:5432" |
||||
volumes: |
||||
- pgdata:/var/lib/postgresql/data |
||||
- ./postgres/docker-entrypoint-initdb:/docker-entrypoint-initdb.d |
||||
volumes: |
||||
pgdata: |
@ -1,765 +0,0 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0 |
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; |
||||
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; |
||||
DROP TABLE IF EXISTS QRTZ_LOCKS; |
||||
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; |
||||
DROP TABLE IF EXISTS QRTZ_CALENDARS; |
||||
|
||||
CREATE TABLE QRTZ_JOB_DETAILS( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
JOB_NAME character varying(200) NOT NULL, |
||||
JOB_GROUP character varying(200) NOT NULL, |
||||
DESCRIPTION character varying(250) NULL, |
||||
JOB_CLASS_NAME character varying(250) NOT NULL, |
||||
IS_DURABLE boolean NOT NULL, |
||||
IS_NONCONCURRENT boolean NOT NULL, |
||||
IS_UPDATE_DATA boolean NOT NULL, |
||||
REQUESTS_RECOVERY boolean NOT NULL, |
||||
JOB_DATA bytea NULL); |
||||
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
JOB_NAME character varying(200) NOT NULL, |
||||
JOB_GROUP character varying(200) NOT NULL, |
||||
DESCRIPTION character varying(250) NULL, |
||||
NEXT_FIRE_TIME BIGINT NULL, |
||||
PREV_FIRE_TIME BIGINT NULL, |
||||
PRIORITY INTEGER NULL, |
||||
TRIGGER_STATE character varying(16) NOT NULL, |
||||
TRIGGER_TYPE character varying(8) NOT NULL, |
||||
START_TIME BIGINT NOT NULL, |
||||
END_TIME BIGINT NULL, |
||||
CALENDAR_NAME character varying(200) NULL, |
||||
MISFIRE_INSTR SMALLINT NULL, |
||||
JOB_DATA bytea NULL) ; |
||||
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
REPEAT_COUNT BIGINT NOT NULL, |
||||
REPEAT_INTERVAL BIGINT NOT NULL, |
||||
TIMES_TRIGGERED BIGINT NOT NULL) ; |
||||
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_CRON_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
CRON_EXPRESSION character varying(120) NOT NULL, |
||||
TIME_ZONE_ID character varying(80)) ; |
||||
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_SIMPROP_TRIGGERS |
||||
( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
STR_PROP_1 character varying(512) NULL, |
||||
STR_PROP_2 character varying(512) NULL, |
||||
STR_PROP_3 character varying(512) NULL, |
||||
INT_PROP_1 INT NULL, |
||||
INT_PROP_2 INT NULL, |
||||
LONG_PROP_1 BIGINT NULL, |
||||
LONG_PROP_2 BIGINT NULL, |
||||
DEC_PROP_1 NUMERIC(13,4) NULL, |
||||
DEC_PROP_2 NUMERIC(13,4) NULL, |
||||
BOOL_PROP_1 boolean NULL, |
||||
BOOL_PROP_2 boolean NULL) ; |
||||
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_BLOB_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
BLOB_DATA bytea NULL) ; |
||||
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_CALENDARS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
CALENDAR_NAME character varying(200) NOT NULL, |
||||
CALENDAR bytea NOT NULL) ; |
||||
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME); |
||||
|
||||
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL) ; |
||||
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_FIRED_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
ENTRY_ID character varying(95) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
INSTANCE_NAME character varying(200) NOT NULL, |
||||
FIRED_TIME BIGINT NOT NULL, |
||||
SCHED_TIME BIGINT NOT NULL, |
||||
PRIORITY INTEGER NOT NULL, |
||||
STATE character varying(16) NOT NULL, |
||||
JOB_NAME character varying(200) NULL, |
||||
JOB_GROUP character varying(200) NULL, |
||||
IS_NONCONCURRENT boolean NULL, |
||||
REQUESTS_RECOVERY boolean NULL) ; |
||||
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID); |
||||
|
||||
CREATE TABLE QRTZ_SCHEDULER_STATE ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
INSTANCE_NAME character varying(200) NOT NULL, |
||||
LAST_CHECKIN_TIME BIGINT NOT NULL, |
||||
CHECKIN_INTERVAL BIGINT NOT NULL) ; |
||||
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME); |
||||
|
||||
CREATE TABLE QRTZ_LOCKS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
LOCK_NAME character varying(40) NOT NULL) ; |
||||
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME); |
||||
|
||||
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY); |
||||
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP); |
||||
|
||||
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); |
||||
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP); |
||||
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME); |
||||
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); |
||||
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE); |
||||
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); |
||||
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); |
||||
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME); |
||||
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); |
||||
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); |
||||
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); |
||||
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); |
||||
|
||||
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME); |
||||
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); |
||||
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); |
||||
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP); |
||||
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); |
||||
|
||||
|
||||
-- |
||||
-- Table structure for table t_ds_access_token |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_access_token; |
||||
CREATE TABLE t_ds_access_token ( |
||||
id int NOT NULL , |
||||
user_id int DEFAULT NULL , |
||||
token varchar(64) DEFAULT NULL , |
||||
expire_time timestamp DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_alert |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_alert; |
||||
CREATE TABLE t_ds_alert ( |
||||
id int NOT NULL , |
||||
title varchar(64) DEFAULT NULL , |
||||
show_type int DEFAULT NULL , |
||||
content text , |
||||
alert_type int DEFAULT NULL , |
||||
alert_status int DEFAULT '0' , |
||||
log text , |
||||
alertgroup_id int DEFAULT NULL , |
||||
receivers text , |
||||
receivers_cc text , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
-- |
||||
-- Table structure for table t_ds_alertgroup |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_alertgroup; |
||||
CREATE TABLE t_ds_alertgroup ( |
||||
id int NOT NULL , |
||||
group_name varchar(255) DEFAULT NULL , |
||||
group_type int DEFAULT NULL , |
||||
description varchar(255) DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_command |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_command; |
||||
CREATE TABLE t_ds_command ( |
||||
id int NOT NULL , |
||||
command_type int DEFAULT NULL , |
||||
process_definition_id int DEFAULT NULL , |
||||
command_param text , |
||||
task_depend_type int DEFAULT NULL , |
||||
failure_strategy int DEFAULT '0' , |
||||
warning_type int DEFAULT '0' , |
||||
warning_group_id int DEFAULT NULL , |
||||
schedule_time timestamp DEFAULT NULL , |
||||
start_time timestamp DEFAULT NULL , |
||||
executor_id int DEFAULT NULL , |
||||
dependence varchar(255) DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
process_instance_priority int DEFAULT NULL , |
||||
worker_group varchar(64), |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_datasource |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_datasource; |
||||
CREATE TABLE t_ds_datasource ( |
||||
id int NOT NULL , |
||||
name varchar(64) NOT NULL , |
||||
note varchar(256) DEFAULT NULL , |
||||
type int NOT NULL , |
||||
user_id int NOT NULL , |
||||
connection_params text NOT NULL , |
||||
create_time timestamp NOT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_error_command |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_error_command; |
||||
CREATE TABLE t_ds_error_command ( |
||||
id int NOT NULL , |
||||
command_type int DEFAULT NULL , |
||||
executor_id int DEFAULT NULL , |
||||
process_definition_id int DEFAULT NULL , |
||||
command_param text , |
||||
task_depend_type int DEFAULT NULL , |
||||
failure_strategy int DEFAULT '0' , |
||||
warning_type int DEFAULT '0' , |
||||
warning_group_id int DEFAULT NULL , |
||||
schedule_time timestamp DEFAULT NULL , |
||||
start_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
dependence text , |
||||
process_instance_priority int DEFAULT NULL , |
||||
worker_group varchar(64), |
||||
message text , |
||||
PRIMARY KEY (id) |
||||
); |
||||
-- |
||||
-- Table structure for table t_ds_master_server |
||||
-- |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_process_definition |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_process_definition; |
||||
CREATE TABLE t_ds_process_definition ( |
||||
id int NOT NULL , |
||||
name varchar(255) DEFAULT NULL , |
||||
version int DEFAULT NULL , |
||||
release_state int DEFAULT NULL , |
||||
project_id int DEFAULT NULL , |
||||
user_id int DEFAULT NULL , |
||||
process_definition_json text , |
||||
description text , |
||||
global_params text , |
||||
flag int DEFAULT NULL , |
||||
locations text , |
||||
connects text , |
||||
receivers text , |
||||
receivers_cc text , |
||||
create_time timestamp DEFAULT NULL , |
||||
timeout int DEFAULT '0' , |
||||
tenant_id int NOT NULL DEFAULT '-1' , |
||||
update_time timestamp DEFAULT NULL , |
||||
modify_by varchar(36) DEFAULT '' , |
||||
resource_ids varchar(64) |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
create index process_definition_index on t_ds_process_definition (project_id,id); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_process_instance |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_process_instance; |
||||
CREATE TABLE t_ds_process_instance ( |
||||
id int NOT NULL , |
||||
name varchar(255) DEFAULT NULL , |
||||
process_definition_id int DEFAULT NULL , |
||||
state int DEFAULT NULL , |
||||
recovery int DEFAULT NULL , |
||||
start_time timestamp DEFAULT NULL , |
||||
end_time timestamp DEFAULT NULL , |
||||
run_times int DEFAULT NULL , |
||||
host varchar(45) DEFAULT NULL , |
||||
command_type int DEFAULT NULL , |
||||
command_param text , |
||||
task_depend_type int DEFAULT NULL , |
||||
max_try_times int DEFAULT '0' , |
||||
failure_strategy int DEFAULT '0' , |
||||
warning_type int DEFAULT '0' , |
||||
warning_group_id int DEFAULT NULL , |
||||
schedule_time timestamp DEFAULT NULL , |
||||
command_start_time timestamp DEFAULT NULL , |
||||
global_params text , |
||||
process_instance_json text , |
||||
flag int DEFAULT '1' , |
||||
update_time timestamp NULL , |
||||
is_sub_process int DEFAULT '0' , |
||||
executor_id int NOT NULL , |
||||
locations text , |
||||
connects text , |
||||
history_cmd text , |
||||
dependence_schedule_times text , |
||||
process_instance_priority int DEFAULT NULL , |
||||
worker_group varchar(64) , |
||||
timeout int DEFAULT '0' , |
||||
tenant_id int NOT NULL DEFAULT '-1' , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
create index process_instance_index on t_ds_process_instance (process_definition_id,id); |
||||
create index start_time_index on t_ds_process_instance (start_time); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_project |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_project; |
||||
CREATE TABLE t_ds_project ( |
||||
id int NOT NULL , |
||||
name varchar(100) DEFAULT NULL , |
||||
description varchar(200) DEFAULT NULL , |
||||
user_id int DEFAULT NULL , |
||||
flag int DEFAULT '1' , |
||||
create_time timestamp DEFAULT CURRENT_TIMESTAMP , |
||||
update_time timestamp DEFAULT CURRENT_TIMESTAMP , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
create index user_id_index on t_ds_project (user_id); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_queue |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_queue; |
||||
CREATE TABLE t_ds_queue ( |
||||
id int NOT NULL , |
||||
queue_name varchar(64) DEFAULT NULL , |
||||
queue varchar(64) DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_datasource_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_datasource_user; |
||||
CREATE TABLE t_ds_relation_datasource_user ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
datasource_id int DEFAULT NULL , |
||||
perm int DEFAULT '1' , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_process_instance |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_process_instance; |
||||
CREATE TABLE t_ds_relation_process_instance ( |
||||
id int NOT NULL , |
||||
parent_process_instance_id int DEFAULT NULL , |
||||
parent_task_instance_id int DEFAULT NULL , |
||||
process_instance_id int DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_project_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_project_user; |
||||
CREATE TABLE t_ds_relation_project_user ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
project_id int DEFAULT NULL , |
||||
perm int DEFAULT '1' , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
create index relation_project_user_id_index on t_ds_relation_project_user (user_id); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_resources_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_resources_user; |
||||
CREATE TABLE t_ds_relation_resources_user ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
resources_id int DEFAULT NULL , |
||||
perm int DEFAULT '1' , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_udfs_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_udfs_user; |
||||
CREATE TABLE t_ds_relation_udfs_user ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
udf_id int DEFAULT NULL , |
||||
perm int DEFAULT '1' , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_user_alertgroup |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; |
||||
CREATE TABLE t_ds_relation_user_alertgroup ( |
||||
id int NOT NULL, |
||||
alertgroup_id int DEFAULT NULL, |
||||
user_id int DEFAULT NULL, |
||||
create_time timestamp DEFAULT NULL, |
||||
update_time timestamp DEFAULT NULL, |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_resources |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_resources; |
||||
CREATE TABLE t_ds_resources ( |
||||
id int NOT NULL , |
||||
alias varchar(64) DEFAULT NULL , |
||||
file_name varchar(64) DEFAULT NULL , |
||||
description varchar(256) DEFAULT NULL , |
||||
user_id int DEFAULT NULL , |
||||
type int DEFAULT NULL , |
||||
size bigint DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
pid int, |
||||
full_name varchar(64), |
||||
is_directory int |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_schedules |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_schedules; |
||||
CREATE TABLE t_ds_schedules ( |
||||
id int NOT NULL , |
||||
process_definition_id int NOT NULL , |
||||
start_time timestamp NOT NULL , |
||||
end_time timestamp NOT NULL , |
||||
crontab varchar(256) NOT NULL , |
||||
failure_strategy int NOT NULL , |
||||
user_id int NOT NULL , |
||||
release_state int NOT NULL , |
||||
warning_type int NOT NULL , |
||||
warning_group_id int DEFAULT NULL , |
||||
process_instance_priority int DEFAULT NULL , |
||||
worker_group varchar(64), |
||||
create_time timestamp NOT NULL , |
||||
update_time timestamp NOT NULL , |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_session |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_session; |
||||
CREATE TABLE t_ds_session ( |
||||
id varchar(64) NOT NULL , |
||||
user_id int DEFAULT NULL , |
||||
ip varchar(45) DEFAULT NULL , |
||||
last_login_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_task_instance |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_task_instance; |
||||
CREATE TABLE t_ds_task_instance ( |
||||
id int NOT NULL , |
||||
name varchar(255) DEFAULT NULL , |
||||
task_type varchar(64) DEFAULT NULL , |
||||
process_definition_id int DEFAULT NULL , |
||||
process_instance_id int DEFAULT NULL , |
||||
task_json text , |
||||
state int DEFAULT NULL , |
||||
submit_time timestamp DEFAULT NULL , |
||||
start_time timestamp DEFAULT NULL , |
||||
end_time timestamp DEFAULT NULL , |
||||
host varchar(45) DEFAULT NULL , |
||||
execute_path varchar(200) DEFAULT NULL , |
||||
log_path varchar(200) DEFAULT NULL , |
||||
alert_flag int DEFAULT NULL , |
||||
retry_times int DEFAULT '0' , |
||||
pid int DEFAULT NULL , |
||||
app_link varchar(255) DEFAULT NULL , |
||||
flag int DEFAULT '1' , |
||||
retry_interval int DEFAULT NULL , |
||||
max_retry_times int DEFAULT NULL , |
||||
task_instance_priority int DEFAULT NULL , |
||||
worker_group varchar(64), |
||||
executor_id int DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_tenant |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_tenant; |
||||
CREATE TABLE t_ds_tenant ( |
||||
id int NOT NULL , |
||||
tenant_code varchar(64) DEFAULT NULL , |
||||
tenant_name varchar(64) DEFAULT NULL , |
||||
description varchar(256) DEFAULT NULL , |
||||
queue_id int DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_udfs |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_udfs; |
||||
CREATE TABLE t_ds_udfs ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
func_name varchar(100) NOT NULL , |
||||
class_name varchar(255) NOT NULL , |
||||
type int NOT NULL , |
||||
arg_types varchar(255) DEFAULT NULL , |
||||
database varchar(255) DEFAULT NULL , |
||||
description varchar(255) DEFAULT NULL , |
||||
resource_id int NOT NULL , |
||||
resource_name varchar(255) NOT NULL , |
||||
create_time timestamp NOT NULL , |
||||
update_time timestamp NOT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_user; |
||||
CREATE TABLE t_ds_user ( |
||||
id int NOT NULL , |
||||
user_name varchar(64) DEFAULT NULL , |
||||
user_password varchar(64) DEFAULT NULL , |
||||
user_type int DEFAULT NULL , |
||||
email varchar(64) DEFAULT NULL , |
||||
phone varchar(11) DEFAULT NULL , |
||||
tenant_id int DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
queue varchar(64) DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_version |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_version; |
||||
CREATE TABLE t_ds_version ( |
||||
id int NOT NULL , |
||||
version varchar(200) NOT NULL, |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
create index version_index on t_ds_version(version); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_worker_group |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_worker_group; |
||||
CREATE TABLE t_ds_worker_group ( |
||||
id bigint NOT NULL , |
||||
name varchar(256) DEFAULT NULL , |
||||
ip_list varchar(256) DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_worker_server |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_worker_server; |
||||
CREATE TABLE t_ds_worker_server ( |
||||
id int NOT NULL , |
||||
host varchar(45) DEFAULT NULL , |
||||
port int DEFAULT NULL , |
||||
zk_directory varchar(64) DEFAULT NULL , |
||||
res_info varchar(255) DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
last_heartbeat_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence; |
||||
CREATE SEQUENCE t_ds_access_token_id_sequence; |
||||
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence; |
||||
CREATE SEQUENCE t_ds_alert_id_sequence; |
||||
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence; |
||||
CREATE SEQUENCE t_ds_alertgroup_id_sequence; |
||||
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence; |
||||
CREATE SEQUENCE t_ds_command_id_sequence; |
||||
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence; |
||||
CREATE SEQUENCE t_ds_datasource_id_sequence; |
||||
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_master_server_id_sequence; |
||||
CREATE SEQUENCE t_ds_master_server_id_sequence; |
||||
ALTER TABLE t_ds_master_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_master_server_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence; |
||||
CREATE SEQUENCE t_ds_process_definition_id_sequence; |
||||
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence; |
||||
CREATE SEQUENCE t_ds_process_instance_id_sequence; |
||||
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence; |
||||
CREATE SEQUENCE t_ds_project_id_sequence; |
||||
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence; |
||||
CREATE SEQUENCE t_ds_queue_id_sequence; |
||||
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence; |
||||
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence; |
||||
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_project_user_id_sequence; |
||||
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence; |
||||
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence; |
||||
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_user_alertgroup_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_user_alertgroup_id_sequence; |
||||
ALTER TABLE t_ds_relation_user_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_user_alertgroup_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence; |
||||
CREATE SEQUENCE t_ds_resources_id_sequence; |
||||
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence; |
||||
CREATE SEQUENCE t_ds_schedules_id_sequence; |
||||
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence; |
||||
CREATE SEQUENCE t_ds_task_instance_id_sequence; |
||||
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence; |
||||
CREATE SEQUENCE t_ds_tenant_id_sequence; |
||||
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence; |
||||
CREATE SEQUENCE t_ds_udfs_id_sequence; |
||||
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_user_id_sequence; |
||||
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence; |
||||
CREATE SEQUENCE t_ds_version_id_sequence; |
||||
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence; |
||||
CREATE SEQUENCE t_ds_worker_group_id_sequence; |
||||
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence; |
||||
CREATE SEQUENCE t_ds_worker_server_id_sequence; |
||||
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence'); |
||||
|
||||
|
||||
-- Records of t_ds_user?user : admin , password : dolphinscheduler123 |
||||
INSERT INTO t_ds_user(user_name,user_password,user_type,email,phone,tenant_id,create_time,update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', 'xx', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22'); |
||||
|
||||
-- Records of t_ds_alertgroup,dolphinscheduler warning group |
||||
INSERT INTO t_ds_alertgroup(group_name,group_type,description,create_time,update_time) VALUES ('dolphinscheduler warning group', '0', 'dolphinscheduler warning group','2018-11-29 10:20:39', '2018-11-29 10:20:39'); |
||||
INSERT INTO t_ds_relation_user_alertgroup(alertgroup_id,user_id,create_time,update_time) VALUES ( '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); |
||||
|
||||
-- Records of t_ds_queue,default queue name : default |
||||
INSERT INTO t_ds_queue(queue_name,queue,create_time,update_time) VALUES ('default', 'default','2018-11-29 10:22:33', '2018-11-29 10:22:33'); |
||||
|
||||
-- Records of t_ds_queue,default queue name : default |
||||
INSERT INTO t_ds_version(version) VALUES ('2.0.0'); |
@ -1,93 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
FROM nginx:alpine |
||||
|
||||
ARG VERSION |
||||
|
||||
ENV TZ Asia/Shanghai |
||||
ENV LANG C.UTF-8 |
||||
ENV DEBIAN_FRONTEND noninteractive |
||||
|
||||
#1. install dos2unix shadow bash openrc python sudo vim wget iputils net-tools ssh pip tini kazoo. |
||||
#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example: |
||||
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories |
||||
RUN apk update && \ |
||||
apk add dos2unix shadow bash openrc python sudo vim wget iputils net-tools openssh-server py2-pip tini && \ |
||||
apk add --update procps && \ |
||||
openrc boot && \ |
||||
pip install kazoo |
||||
|
||||
#2. install jdk |
||||
RUN apk add openjdk8 |
||||
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk |
||||
ENV PATH $JAVA_HOME/bin:$PATH |
||||
|
||||
#3. install zk |
||||
RUN cd /opt && \ |
||||
wget https://downloads.apache.org/zookeeper/zookeeper-3.5.7/apache-zookeeper-3.5.7-bin.tar.gz && \ |
||||
tar -zxvf apache-zookeeper-3.5.7-bin.tar.gz && \ |
||||
mv apache-zookeeper-3.5.7-bin zookeeper && \ |
||||
mkdir -p /tmp/zookeeper && \ |
||||
rm -rf ./zookeeper-*tar.gz && \ |
||||
rm -rf /opt/zookeeper/conf/zoo_sample.cfg |
||||
ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf |
||||
ENV ZK_HOME /opt/zookeeper |
||||
ENV PATH $ZK_HOME/bin:$PATH |
||||
|
||||
#4. install pg |
||||
RUN apk add postgresql postgresql-contrib |
||||
|
||||
#5. add dolphinscheduler |
||||
ADD ./apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz /opt/ |
||||
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin/ /opt/dolphinscheduler/ |
||||
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler |
||||
|
||||
#6. modify nginx |
||||
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \ |
||||
rm -rf /etc/nginx/conf.d/* |
||||
ADD ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d |
||||
|
||||
#7. add configuration and modify permissions and set soft links |
||||
ADD ./checkpoint.sh /root/checkpoint.sh |
||||
ADD ./startup-init-conf.sh /root/startup-init-conf.sh |
||||
ADD ./startup.sh /root/startup.sh |
||||
ADD ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/ |
||||
ADD conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/ |
||||
RUN chmod +x /root/checkpoint.sh && \ |
||||
chmod +x /root/startup-init-conf.sh && \ |
||||
chmod +x /root/startup.sh && \ |
||||
chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \ |
||||
chmod +x /opt/dolphinscheduler/script/*.sh && \ |
||||
chmod +x /opt/dolphinscheduler/bin/*.sh && \ |
||||
dos2unix /root/checkpoint.sh && \ |
||||
dos2unix /root/startup-init-conf.sh && \ |
||||
dos2unix /root/startup.sh && \ |
||||
dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \ |
||||
dos2unix /opt/dolphinscheduler/script/*.sh && \ |
||||
dos2unix /opt/dolphinscheduler/bin/*.sh && \ |
||||
rm -rf /bin/sh && \ |
||||
ln -s /bin/bash /bin/sh && \ |
||||
mkdir -p /tmp/xls |
||||
|
||||
#8. remove apk index cache |
||||
RUN rm -rf /var/cache/apk/* |
||||
|
||||
#9. expose port |
||||
EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888 |
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"] |
@ -1,328 +0,0 @@
|
||||
## What is Dolphin Scheduler? |
||||
|
||||
Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing. |
||||
|
||||
Github URL: https://github.com/apache/incubator-dolphinscheduler |
||||
|
||||
Official Website: https://dolphinscheduler.apache.org |
||||
|
||||
![Dolphin Scheduler](https://dolphinscheduler.apache.org/img/hlogo_colorful.svg) |
||||
|
||||
[![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md) |
||||
[![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md) |
||||
|
||||
## How to use this docker image |
||||
|
||||
#### You can start a dolphinscheduler instance |
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -e POSTGRESQL_DATABASE=dolphinscheduler \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
The default postgres user `root`, postgres password `root` and database `dolphinscheduler` are created in the `startup.sh`. |
||||
|
||||
The default zookeeper is created in the `startup.sh`. |
||||
|
||||
#### Or via Environment Variables **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`POSTGRESQL_DATABASE`** **`ZOOKEEPER_QUORUM`** |
||||
|
||||
You can specify **existing postgres service**. Example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
You can specify **existing zookeeper service**. Example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
#### Or start a standalone dolphinscheduler server |
||||
|
||||
You can start a standalone dolphinscheduler server. |
||||
|
||||
* Start a **master server**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler master-server |
||||
``` |
||||
|
||||
* Start a **worker server**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler worker-server |
||||
``` |
||||
|
||||
* Start a **api server**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 12345:12345 \ |
||||
dolphinscheduler api-server |
||||
``` |
||||
|
||||
* Start a **alert server**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler alert-server |
||||
``` |
||||
|
||||
* Start a **frontend**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler frontend |
||||
``` |
||||
|
||||
**Note**: You must be specify `POSTGRESQL_HOST` `POSTGRESQL_PORT` `POSTGRESQL_DATABASE` `POSTGRESQL_USERNAME` `POSTGRESQL_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server. |
||||
|
||||
## How to build a docker image |
||||
|
||||
You can build a docker image in A Unix-like operating system, You can also build it in Windows operating system. |
||||
|
||||
In Unix-Like, Example: |
||||
|
||||
```bash |
||||
$ cd path/incubator-dolphinscheduler |
||||
$ sh ./dockerfile/hooks/build |
||||
``` |
||||
|
||||
In Windows, Example: |
||||
|
||||
```bat |
||||
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat |
||||
``` |
||||
|
||||
Please read `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat` script files if you don't understand |
||||
|
||||
## Environment Variables |
||||
|
||||
The Dolphin Scheduler image uses several environment variables which are easy to miss. While none of the variables are required, they may significantly aid you in using the image. |
||||
|
||||
**`POSTGRESQL_HOST`** |
||||
|
||||
This environment variable sets the host for PostgreSQL. The default value is `127.0.0.1`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`. |
||||
|
||||
**`POSTGRESQL_PORT`** |
||||
|
||||
This environment variable sets the port for PostgreSQL. The default value is `5432`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`. |
||||
|
||||
**`POSTGRESQL_USERNAME`** |
||||
|
||||
This environment variable sets the username for PostgreSQL. The default value is `root`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`. |
||||
|
||||
**`POSTGRESQL_PASSWORD`** |
||||
|
||||
This environment variable sets the password for PostgreSQL. The default value is `root`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`. |
||||
|
||||
**`POSTGRESQL_DATABASE`** |
||||
|
||||
This environment variable sets the database for PostgreSQL. The default value is `dolphinscheduler`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`. |
||||
|
||||
**`DOLPHINSCHEDULER_ENV_PATH`** |
||||
|
||||
This environment variable sets the runtime environment for task. The default value is `/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh`. |
||||
|
||||
**`DOLPHINSCHEDULER_DATA_BASEDIR_PATH`** |
||||
|
||||
User data directory path, self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler` |
||||
|
||||
**`ZOOKEEPER_QUORUM`** |
||||
|
||||
This environment variable sets zookeeper quorum for `master-server` and `worker-serverr`. The default value is `127.0.0.1:2181`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`. |
||||
|
||||
**`MASTER_EXEC_THREADS`** |
||||
|
||||
This environment variable sets exec thread num for `master-server`. The default value is `100`. |
||||
|
||||
**`MASTER_EXEC_TASK_NUM`** |
||||
|
||||
This environment variable sets exec task num for `master-server`. The default value is `20`. |
||||
|
||||
**`MASTER_HEARTBEAT_INTERVAL`** |
||||
|
||||
This environment variable sets heartbeat interval for `master-server`. The default value is `10`. |
||||
|
||||
**`MASTER_TASK_COMMIT_RETRYTIMES`** |
||||
|
||||
This environment variable sets task commit retry times for `master-server`. The default value is `5`. |
||||
|
||||
**`MASTER_TASK_COMMIT_INTERVAL`** |
||||
|
||||
This environment variable sets task commit interval for `master-server`. The default value is `1000`. |
||||
|
||||
**`MASTER_MAX_CPULOAD_AVG`** |
||||
|
||||
This environment variable sets max cpu load avg for `master-server`. The default value is `100`. |
||||
|
||||
**`MASTER_RESERVED_MEMORY`** |
||||
|
||||
This environment variable sets reserved memory for `master-server`. The default value is `0.1`. |
||||
|
||||
**`MASTER_LISTEN_PORT`** |
||||
|
||||
This environment variable sets port for `master-server`. The default value is `5678`. |
||||
|
||||
**`WORKER_EXEC_THREADS`** |
||||
|
||||
This environment variable sets exec thread num for `worker-server`. The default value is `100`. |
||||
|
||||
**`WORKER_HEARTBEAT_INTERVAL`** |
||||
|
||||
This environment variable sets heartbeat interval for `worker-server`. The default value is `10`. |
||||
|
||||
**`WORKER_FETCH_TASK_NUM`** |
||||
|
||||
This environment variable sets fetch task num for `worker-server`. The default value is `3`. |
||||
|
||||
**`WORKER_MAX_CPULOAD_AVG`** |
||||
|
||||
This environment variable sets max cpu load avg for `worker-server`. The default value is `100`. |
||||
|
||||
**`WORKER_RESERVED_MEMORY`** |
||||
|
||||
This environment variable sets reserved memory for `worker-server`. The default value is `0.1`. |
||||
|
||||
**`WORKER_LISTEN_PORT`** |
||||
|
||||
This environment variable sets port for `worker-server`. The default value is `1234`. |
||||
|
||||
**`WORKER_GROUP`** |
||||
|
||||
This environment variable sets group for `worker-server`. The default value is `default`. |
||||
|
||||
**`XLS_FILE_PATH`** |
||||
|
||||
This environment variable sets xls file path for `alert-server`. The default value is `/tmp/xls`. |
||||
|
||||
**`MAIL_SERVER_HOST`** |
||||
|
||||
This environment variable sets mail server host for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_SERVER_PORT`** |
||||
|
||||
This environment variable sets mail server port for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_SENDER`** |
||||
|
||||
This environment variable sets mail sender for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_USER=`** |
||||
|
||||
This environment variable sets mail user for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_PASSWD`** |
||||
|
||||
This environment variable sets mail password for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_SMTP_STARTTLS_ENABLE`** |
||||
|
||||
This environment variable sets SMTP tls for `alert-server`. The default value is `true`. |
||||
|
||||
**`MAIL_SMTP_SSL_ENABLE`** |
||||
|
||||
This environment variable sets SMTP ssl for `alert-server`. The default value is `false`. |
||||
|
||||
**`MAIL_SMTP_SSL_TRUST`** |
||||
|
||||
This environment variable sets SMTP ssl truest for `alert-server`. The default value is empty. |
||||
|
||||
**`ENTERPRISE_WECHAT_ENABLE`** |
||||
|
||||
This environment variable sets enterprise wechat enable for `alert-server`. The default value is `false`. |
||||
|
||||
**`ENTERPRISE_WECHAT_CORP_ID`** |
||||
|
||||
This environment variable sets enterprise wechat corp id for `alert-server`. The default value is empty. |
||||
|
||||
**`ENTERPRISE_WECHAT_SECRET`** |
||||
|
||||
This environment variable sets enterprise wechat secret for `alert-server`. The default value is empty. |
||||
|
||||
**`ENTERPRISE_WECHAT_AGENT_ID`** |
||||
|
||||
This environment variable sets enterprise wechat agent id for `alert-server`. The default value is empty. |
||||
|
||||
**`ENTERPRISE_WECHAT_USERS`** |
||||
|
||||
This environment variable sets enterprise wechat users for `alert-server`. The default value is empty. |
||||
|
||||
**`FRONTEND_API_SERVER_HOST`** |
||||
|
||||
This environment variable sets api server host for `frontend`. The default value is `127.0.0.1`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`. |
||||
|
||||
**`FRONTEND_API_SERVER_PORT`** |
||||
|
||||
This environment variable sets api server port for `frontend`. The default value is `123451`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`. |
||||
|
||||
## Initialization scripts |
||||
|
||||
If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`. |
||||
|
||||
For example, to add an environment variable `API_SERVER_PORT` in `/root/start-init-conf.sh`: |
||||
|
||||
``` |
||||
export API_SERVER_PORT=5555 |
||||
``` |
||||
|
||||
and to modify `/opt/dolphinscheduler/conf/application-api.properties.tpl` template file, add server port: |
||||
``` |
||||
server.port=${API_SERVER_PORT} |
||||
``` |
||||
|
||||
`/root/start-init-conf.sh` will dynamically generate config file: |
||||
|
||||
```sh |
||||
echo "generate app config" |
||||
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do |
||||
eval "cat << EOF |
||||
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line}) |
||||
EOF |
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} |
||||
done |
||||
|
||||
echo "generate nginx config" |
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
``` |
@ -1,328 +0,0 @@
|
||||
## Dolphin Scheduler是什么? |
||||
|
||||
一个分布式易扩展的可视化DAG工作流任务调度系统。致力于解决数据处理流程中错综复杂的依赖关系,使调度系统在数据处理流程中`开箱即用`。 |
||||
|
||||
Github URL: https://github.com/apache/incubator-dolphinscheduler |
||||
|
||||
Official Website: https://dolphinscheduler.apache.org |
||||
|
||||
![Dolphin Scheduler](https://dolphinscheduler.apache.org/img/hlogo_colorful.svg) |
||||
|
||||
[![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md) |
||||
[![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md) |
||||
|
||||
## 如何使用docker镜像 |
||||
|
||||
#### 你可以运行一个dolphinscheduler实例 |
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -e POSTGRESQL_DATABASE=dolphinscheduler \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
在`startup.sh`脚本中,默认的创建`Postgres`的用户、密码和数据库,默认值分别为:`root`、`root`、`dolphinscheduler`。 |
||||
|
||||
同时,默认的`Zookeeper`也会在`startup.sh`脚本中被创建。 |
||||
|
||||
#### 或者通过环境变量 **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`ZOOKEEPER_QUORUM`** 使用已存在的服务 |
||||
|
||||
你可以指定一个已经存在的 **`Postgres`** 服务. 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
你也可以指定一个已经存在的 **Zookeeper** 服务. 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
#### 或者运行dolphinscheduler中的部分服务 |
||||
|
||||
你能够运行dolphinscheduler中的部分服务。 |
||||
|
||||
* 启动一个 **master server**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler master-server |
||||
``` |
||||
|
||||
* 启动一个 **worker server**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler worker-server |
||||
``` |
||||
|
||||
* 启动一个 **api server**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 12345:12345 \ |
||||
dolphinscheduler api-server |
||||
``` |
||||
|
||||
* 启动一个 **alert server**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler alert-server |
||||
``` |
||||
|
||||
* 启动一个 **frontend**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler frontend |
||||
``` |
||||
|
||||
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `POSTGRESQL_HOST` `POSTGRESQL_PORT` `POSTGRESQL_DATABASE` `POSTGRESQL_USERNAME` `POSTGRESQL_PASSWORD` `ZOOKEEPER_QUORUM`。 |
||||
|
||||
## 如何构建一个docker镜像 |
||||
|
||||
你能够在类Unix系统和Windows系统中构建一个docker镜像。 |
||||
|
||||
类Unix系统, 如下: |
||||
|
||||
```bash |
||||
$ cd path/incubator-dolphinscheduler |
||||
$ sh ./dockerfile/hooks/build |
||||
``` |
||||
|
||||
Windows系统, 如下: |
||||
|
||||
```bat |
||||
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat |
||||
``` |
||||
|
||||
如果你不理解这些脚本 `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat`,请阅读里面的内容。 |
||||
|
||||
## 环境变量 |
||||
|
||||
Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些变量不是必须的,但是可以帮助你更容易配置镜像并根据你的需求定义相应的服务配置。 |
||||
|
||||
**`POSTGRESQL_HOST`** |
||||
|
||||
配置`PostgreSQL`的`HOST`, 默认值 `127.0.0.1`。 |
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。 |
||||
|
||||
**`POSTGRESQL_PORT`** |
||||
|
||||
配置`PostgreSQL`的`PORT`, 默认值 `5432`。 |
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。 |
||||
|
||||
**`POSTGRESQL_USERNAME`** |
||||
|
||||
配置`PostgreSQL`的`USERNAME`, 默认值 `root`。 |
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。 |
||||
|
||||
**`POSTGRESQL_PASSWORD`** |
||||
|
||||
配置`PostgreSQL`的`PASSWORD`, 默认值 `root`。 |
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。 |
||||
|
||||
**`POSTGRESQL_DATABASE`** |
||||
|
||||
配置`PostgreSQL`的`DATABASE`, 默认值 `dolphinscheduler`。 |
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。 |
||||
|
||||
**`DOLPHINSCHEDULER_ENV_PATH`** |
||||
|
||||
任务执行时的环境变量配置文件, 默认值 `/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh`。 |
||||
|
||||
**`DOLPHINSCHEDULER_DATA_BASEDIR_PATH`** |
||||
|
||||
用户数据目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler`。 |
||||
|
||||
**`ZOOKEEPER_QUORUM`** |
||||
|
||||
配置`master-server`和`worker-serverr`的`Zookeeper`地址, 默认值 `127.0.0.1:2181`。 |
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。 |
||||
|
||||
**`MASTER_EXEC_THREADS`** |
||||
|
||||
配置`master-server`中的执行线程数量,默认值 `100`。 |
||||
|
||||
**`MASTER_EXEC_TASK_NUM`** |
||||
|
||||
配置`master-server`中的执行任务数量,默认值 `20`。 |
||||
|
||||
**`MASTER_HEARTBEAT_INTERVAL`** |
||||
|
||||
配置`master-server`中的心跳交互时间,默认值 `10`。 |
||||
|
||||
**`MASTER_TASK_COMMIT_RETRYTIMES`** |
||||
|
||||
配置`master-server`中的任务提交重试次数,默认值 `5`。 |
||||
|
||||
**`MASTER_TASK_COMMIT_INTERVAL`** |
||||
|
||||
配置`master-server`中的任务提交交互时间,默认值 `1000`。 |
||||
|
||||
**`MASTER_MAX_CPULOAD_AVG`** |
||||
|
||||
配置`master-server`中的CPU中的`load average`值,默认值 `100`。 |
||||
|
||||
**`MASTER_RESERVED_MEMORY`** |
||||
|
||||
配置`master-server`的保留内存,默认值 `0.1`。 |
||||
|
||||
**`MASTER_LISTEN_PORT`** |
||||
|
||||
配置`master-server`的端口,默认值 `5678`。 |
||||
|
||||
**`WORKER_EXEC_THREADS`** |
||||
|
||||
配置`worker-server`中的执行线程数量,默认值 `100`。 |
||||
|
||||
**`WORKER_HEARTBEAT_INTERVAL`** |
||||
|
||||
配置`worker-server`中的心跳交互时间,默认值 `10`。 |
||||
|
||||
**`WORKER_FETCH_TASK_NUM`** |
||||
|
||||
配置`worker-server`中的获取任务的数量,默认值 `3`。 |
||||
|
||||
**`WORKER_MAX_CPULOAD_AVG`** |
||||
|
||||
配置`worker-server`中的CPU中的最大`load average`值,默认值 `100`。 |
||||
|
||||
**`WORKER_RESERVED_MEMORY`** |
||||
|
||||
配置`worker-server`的保留内存,默认值 `0.1`。 |
||||
|
||||
**`WORKER_LISTEN_PORT`** |
||||
|
||||
配置`worker-server`的端口,默认值 `1234`。 |
||||
|
||||
**`WORKER_GROUP`** |
||||
|
||||
配置`worker-server`的分组,默认值 `default`。 |
||||
|
||||
**`XLS_FILE_PATH`** |
||||
|
||||
配置`alert-server`的`XLS`文件的存储路径,默认值 `/tmp/xls`。 |
||||
|
||||
**`MAIL_SERVER_HOST`** |
||||
|
||||
配置`alert-server`的邮件服务地址,默认值 `空`。 |
||||
|
||||
**`MAIL_SERVER_PORT`** |
||||
|
||||
配置`alert-server`的邮件服务端口,默认值 `空`。 |
||||
|
||||
**`MAIL_SENDER`** |
||||
|
||||
配置`alert-server`的邮件发送人,默认值 `空`。 |
||||
|
||||
**`MAIL_USER=`** |
||||
|
||||
配置`alert-server`的邮件服务用户名,默认值 `空`。 |
||||
|
||||
**`MAIL_PASSWD`** |
||||
|
||||
配置`alert-server`的邮件服务用户密码,默认值 `空`。 |
||||
|
||||
**`MAIL_SMTP_STARTTLS_ENABLE`** |
||||
|
||||
配置`alert-server`的邮件服务是否启用TLS,默认值 `true`。 |
||||
|
||||
**`MAIL_SMTP_SSL_ENABLE`** |
||||
|
||||
配置`alert-server`的邮件服务是否启用SSL,默认值 `false`。 |
||||
|
||||
**`MAIL_SMTP_SSL_TRUST`** |
||||
|
||||
配置`alert-server`的邮件服务SSL的信任地址,默认值 `空`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_ENABLE`** |
||||
|
||||
配置`alert-server`的邮件服务是否启用企业微信,默认值 `false`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_CORP_ID`** |
||||
|
||||
配置`alert-server`的邮件服务企业微信`ID`,默认值 `空`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_SECRET`** |
||||
|
||||
配置`alert-server`的邮件服务企业微信`SECRET`,默认值 `空`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_AGENT_ID`** |
||||
|
||||
配置`alert-server`的邮件服务企业微信`AGENT_ID`,默认值 `空`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_USERS`** |
||||
|
||||
配置`alert-server`的邮件服务企业微信`USERS`,默认值 `空`。 |
||||
|
||||
**`FRONTEND_API_SERVER_HOST`** |
||||
|
||||
配置`frontend`的连接`api-server`的地址,默认值 `127.0.0.1`。 |
||||
|
||||
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。 |
||||
|
||||
**`FRONTEND_API_SERVER_PORT`** |
||||
|
||||
配置`frontend`的连接`api-server`的端口,默认值 `12345`。 |
||||
|
||||
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。 |
||||
|
||||
## 初始化脚本 |
||||
|
||||
如果你想在编译的时候或者运行的时候附加一些其它的操作及新增一些环境变量,你可以在`/root/start-init-conf.sh`文件中进行修改,同时如果涉及到配置文件的修改,请在`/opt/dolphinscheduler/conf/*.tpl`中修改相应的配置文件 |
||||
|
||||
例如,在`/root/start-init-conf.sh`添加一个环境变量`API_SERVER_PORT`: |
||||
|
||||
``` |
||||
export API_SERVER_PORT=5555 |
||||
``` |
||||
|
||||
当添加以上环境变量后,你应该在相应的模板文件`/opt/dolphinscheduler/conf/application-api.properties.tpl`中添加这个环境变量配置: |
||||
``` |
||||
server.port=${API_SERVER_PORT} |
||||
``` |
||||
|
||||
`/root/start-init-conf.sh`将根据模板文件动态的生成配置文件: |
||||
|
||||
```sh |
||||
echo "generate app config" |
||||
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do |
||||
eval "cat << EOF |
||||
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line}) |
||||
EOF |
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} |
||||
done |
||||
|
||||
echo "generate nginx config" |
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
``` |
@ -1,27 +0,0 @@
|
||||
#!/bin/bash |
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
set -e |
||||
|
||||
if [ "$(ps -ef | grep java | grep -c $1)" -eq 0 ]; then |
||||
echo "[ERROR] $1 process not exits." |
||||
exit 1 |
||||
else |
||||
echo "[INFO] $1 process exits." |
||||
exit 0 |
||||
fi |
@ -1,50 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
#alert type is EMAIL/SMS |
||||
alert.type=EMAIL |
||||
|
||||
# alter msg template, default is html template |
||||
#alert.template=html |
||||
# mail server configuration |
||||
mail.protocol=SMTP |
||||
mail.server.host=${MAIL_SERVER_HOST} |
||||
mail.server.port=${MAIL_SERVER_PORT} |
||||
mail.sender=${MAIL_SENDER} |
||||
mail.user=${MAIL_USER} |
||||
mail.passwd=${MAIL_PASSWD} |
||||
# TLS |
||||
mail.smtp.starttls.enable=${MAIL_SMTP_STARTTLS_ENABLE} |
||||
# SSL |
||||
mail.smtp.ssl.enable=${MAIL_SMTP_SSL_ENABLE} |
||||
mail.smtp.ssl.trust=${MAIL_SMTP_SSL_TRUST} |
||||
|
||||
#xls file path,need create if not exist |
||||
xls.file.path=${XLS_FILE_PATH} |
||||
|
||||
# Enterprise WeChat configuration |
||||
enterprise.wechat.enable=${ENTERPRISE_WECHAT_ENABLE} |
||||
enterprise.wechat.corp.id=${ENTERPRISE_WECHAT_CORP_ID} |
||||
enterprise.wechat.secret=${ENTERPRISE_WECHAT_SECRET} |
||||
enterprise.wechat.agent.id=${ENTERPRISE_WECHAT_AGENT_ID} |
||||
enterprise.wechat.users=${ENTERPRISE_WECHAT_USERS} |
||||
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret |
||||
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token |
||||
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"} |
||||
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}} |
||||
|
||||
|
||||
|
@ -1,45 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
# server port |
||||
server.port=12345 |
||||
|
||||
# session config |
||||
server.servlet.session.timeout=7200 |
||||
|
||||
# servlet config |
||||
server.servlet.context-path=/dolphinscheduler/ |
||||
|
||||
# file size limit for upload |
||||
spring.servlet.multipart.max-file-size=1024MB |
||||
spring.servlet.multipart.max-request-size=1024MB |
||||
|
||||
# post content |
||||
server.jetty.max-http-post-size=5000000 |
||||
|
||||
# i18n |
||||
spring.messages.encoding=UTF-8 |
||||
|
||||
#i18n classpath folder , file prefix messages, if have many files, use "," seperator |
||||
spring.messages.basename=i18n/messages |
||||
|
||||
# Authentication types (supported types: PASSWORD) |
||||
security.authentication.type=PASSWORD |
||||
|
||||
|
||||
|
||||
|
@ -1,78 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
#============================================================================ |
||||
# System |
||||
#============================================================================ |
||||
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions |
||||
dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH} |
||||
|
||||
# user data directory path, self configuration, please make sure the directory exists and have read write permissions |
||||
data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH} |
||||
|
||||
# resource upload startup type : HDFS,S3,NONE |
||||
resource.storage.type=NONE |
||||
|
||||
#============================================================================ |
||||
# HDFS |
||||
#============================================================================ |
||||
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended |
||||
#resource.upload.path=/dolphinscheduler |
||||
|
||||
# whether kerberos starts |
||||
#hadoop.security.authentication.startup.state=false |
||||
|
||||
# java.security.krb5.conf path |
||||
#java.security.krb5.conf.path=/opt/krb5.conf |
||||
|
||||
# loginUserFromKeytab user |
||||
#login.user.keytab.username=hdfs-mycluster@ESZ.COM |
||||
|
||||
# loginUserFromKeytab path |
||||
#login.user.keytab.path=/opt/hdfs.headless.keytab |
||||
|
||||
#resource.view.suffixs |
||||
#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties |
||||
|
||||
# if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path |
||||
hdfs.root.user=hdfs |
||||
|
||||
# kerberos expire time |
||||
kerberos.expire.time=7 |
||||
|
||||
#============================================================================ |
||||
# S3 |
||||
#============================================================================ |
||||
# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir |
||||
fs.defaultFS=hdfs://mycluster:8020 |
||||
|
||||
# if resource.storage.type=S3,s3 endpoint |
||||
#fs.s3a.endpoint=http://192.168.199.91:9010 |
||||
|
||||
# if resource.storage.type=S3,s3 access key |
||||
#fs.s3a.access.key=A3DXS30FO22544RE |
||||
|
||||
# if resource.storage.type=S3,s3 secret key |
||||
#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK |
||||
|
||||
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO |
||||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx |
||||
|
||||
# If resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname. |
||||
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s |
||||
|
||||
|
@ -1,71 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
|
||||
# mysql |
||||
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver |
||||
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8 |
||||
|
||||
# postgre |
||||
spring.datasource.driver-class-name=org.postgresql.Driver |
||||
spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8 |
||||
spring.datasource.username=${POSTGRESQL_USERNAME} |
||||
spring.datasource.password=${POSTGRESQL_PASSWORD} |
||||
|
||||
## base spring data source configuration todo need to remove |
||||
#spring.datasource.type=com.alibaba.druid.pool.DruidDataSource |
||||
|
||||
# connection configuration |
||||
#spring.datasource.initialSize=5 |
||||
# min connection number |
||||
#spring.datasource.minIdle=5 |
||||
# max connection number |
||||
#spring.datasource.maxActive=50 |
||||
|
||||
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. |
||||
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. |
||||
#spring.datasource.maxWait=60000 |
||||
|
||||
# milliseconds for check to close free connections |
||||
#spring.datasource.timeBetweenEvictionRunsMillis=60000 |
||||
|
||||
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. |
||||
#spring.datasource.timeBetweenConnectErrorMillis=60000 |
||||
|
||||
# the longest time a connection remains idle without being evicted, in milliseconds |
||||
#spring.datasource.minEvictableIdleTimeMillis=300000 |
||||
|
||||
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. |
||||
#spring.datasource.validationQuery=SELECT 1 |
||||
|
||||
#check whether the connection is valid for timeout, in seconds |
||||
#spring.datasource.validationQueryTimeout=3 |
||||
|
||||
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis, |
||||
# validation Query is performed to check whether the connection is valid |
||||
#spring.datasource.testWhileIdle=true |
||||
|
||||
#execute validation to check if the connection is valid when applying for a connection |
||||
#spring.datasource.testOnBorrow=true |
||||
#execute validation to check if the connection is valid when the connection is returned |
||||
#spring.datasource.testOnReturn=false |
||||
#spring.datasource.defaultAutoCommit=true |
||||
#spring.datasource.keepAlive=true |
||||
|
||||
# open PSCache, specify count PSCache for every connection |
||||
#spring.datasource.poolPreparedStatements=true |
||||
#spring.datasource.maxPoolPreparedStatementPerConnectionSize=20 |
@ -1,26 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
export HADOOP_HOME=/opt/soft/hadoop |
||||
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop |
||||
export SPARK_HOME1=/opt/soft/spark1 |
||||
export SPARK_HOME2=/opt/soft/spark2 |
||||
export PYTHON_HOME=/opt/soft/python |
||||
export JAVA_HOME=/opt/soft/java |
||||
export HIVE_HOME=/opt/soft/hive |
||||
export FLINK_HOME=/opt/soft/flink |
||||
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH |
@ -1,40 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
# master execute thread num |
||||
master.exec.threads=${MASTER_EXEC_THREADS} |
||||
|
||||
# master execute task number in parallel |
||||
master.exec.task.num=${MASTER_EXEC_TASK_NUM} |
||||
|
||||
# master heartbeat interval |
||||
master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL} |
||||
|
||||
# master commit task retry times |
||||
master.task.commit.retryTimes=${MASTER_TASK_COMMIT_RETRYTIMES} |
||||
|
||||
# master commit task interval |
||||
master.task.commit.interval=${MASTER_TASK_COMMIT_INTERVAL} |
||||
|
||||
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 |
||||
master.max.cpuload.avg=${MASTER_MAX_CPULOAD_AVG} |
||||
|
||||
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G. |
||||
master.reserved.memory=${MASTER_RESERVED_MEMORY} |
||||
|
||||
# master listen port |
||||
#master.listen.port=${MASTER_LISTEN_PORT} |
@ -1,54 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
#============================================================================ |
||||
# Configure Main Scheduler Properties |
||||
#============================================================================ |
||||
#org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate |
||||
#org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate |
||||
|
||||
#org.quartz.scheduler.instanceName = DolphinScheduler |
||||
#org.quartz.scheduler.instanceId = AUTO |
||||
#org.quartz.scheduler.makeSchedulerThreadDaemon = true |
||||
#org.quartz.jobStore.useProperties = false |
||||
|
||||
#============================================================================ |
||||
# Configure ThreadPool |
||||
#============================================================================ |
||||
|
||||
#org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool |
||||
#org.quartz.threadPool.makeThreadsDaemons = true |
||||
#org.quartz.threadPool.threadCount = 25 |
||||
#org.quartz.threadPool.threadPriority = 5 |
||||
|
||||
#============================================================================ |
||||
# Configure JobStore |
||||
#============================================================================ |
||||
|
||||
#org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX |
||||
|
||||
#org.quartz.jobStore.tablePrefix = QRTZ_ |
||||
#org.quartz.jobStore.isClustered = true |
||||
#org.quartz.jobStore.misfireThreshold = 60000 |
||||
#org.quartz.jobStore.clusterCheckinInterval = 5000 |
||||
#org.quartz.jobStore.acquireTriggersWithinLock=true |
||||
#org.quartz.jobStore.dataSource = myDs |
||||
|
||||
#============================================================================ |
||||
# Configure Datasources |
||||
#============================================================================ |
||||
#org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider |
@ -1,37 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
# worker execute thread num |
||||
worker.exec.threads=${WORKER_EXEC_THREADS} |
||||
|
||||
# worker heartbeat interval |
||||
worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL} |
||||
|
||||
# submit the number of tasks at a time |
||||
worker.fetch.task.num=${WORKER_FETCH_TASK_NUM} |
||||
|
||||
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 |
||||
worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG} |
||||
|
||||
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G. |
||||
worker.reserved.memory=${WORKER_RESERVED_MEMORY} |
||||
|
||||
# worker listener port |
||||
#worker.listen.port=${WORKER_LISTEN_PORT} |
||||
|
||||
# default worker group |
||||
#worker.group=${WORKER_GROUP} |
@ -1,29 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
# zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 |
||||
zookeeper.quorum=${ZOOKEEPER_QUORUM} |
||||
|
||||
# dolphinscheduler root directory |
||||
#zookeeper.dolphinscheduler.root=/dolphinscheduler |
||||
|
||||
# dolphinscheduler failover directory |
||||
#zookeeper.session.timeout=300 |
||||
#zookeeper.connection.timeout=300 |
||||
#zookeeper.retry.base.sleep=100 |
||||
#zookeeper.retry.max.sleep=30000 |
||||
#zookeeper.retry.maxtime=5 |
@ -1,48 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
server { |
||||
listen 8888; |
||||
server_name localhost; |
||||
#charset koi8-r; |
||||
#access_log /var/log/nginx/host.access.log main; |
||||
location / { |
||||
root /opt/dolphinscheduler/ui; |
||||
index index.html index.html; |
||||
} |
||||
location /dolphinscheduler { |
||||
proxy_pass http://FRONTEND_API_SERVER_HOST:FRONTEND_API_SERVER_PORT; |
||||
proxy_set_header Host $host; |
||||
proxy_set_header X-Real-IP $remote_addr; |
||||
proxy_set_header x_real_ipP $remote_addr; |
||||
proxy_set_header remote_addr $remote_addr; |
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
||||
proxy_http_version 1.1; |
||||
proxy_connect_timeout 300s; |
||||
proxy_read_timeout 300s; |
||||
proxy_send_timeout 300s; |
||||
proxy_set_header Upgrade $http_upgrade; |
||||
proxy_set_header Connection "upgrade"; |
||||
} |
||||
#error_page 404 /404.html; |
||||
# redirect server error pages to the static page /50x.html |
||||
# |
||||
error_page 500 502 503 504 /50x.html; |
||||
location = /50x.html { |
||||
root /usr/share/nginx/html; |
||||
} |
||||
} |
@ -1,45 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
# The number of milliseconds of each tick |
||||
tickTime=2000 |
||||
# The number of ticks that the initial |
||||
# synchronization phase can take |
||||
initLimit=10 |
||||
# The number of ticks that can pass between |
||||
# sending a request and getting an acknowledgement |
||||
syncLimit=5 |
||||
# the directory where the snapshot is stored. |
||||
# do not use /tmp for storage, /tmp here is just |
||||
# example sakes. |
||||
dataDir=/tmp/zookeeper |
||||
# the port at which the clients will connect |
||||
clientPort=2181 |
||||
# the maximum number of client connections. |
||||
# increase this if you need to handle more clients |
||||
#maxClientCnxns=60 |
||||
# |
||||
# Be sure to read the maintenance section of the |
||||
# administrator guide before turning on autopurge. |
||||
# |
||||
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance |
||||
# |
||||
# The number of snapshots to retain in dataDir |
||||
#autopurge.snapRetainCount=3 |
||||
# Purge task interval in hours |
||||
# Set to "0" to disable auto purge feature |
||||
#autopurge.purgeInterval=1 |
@ -1,53 +0,0 @@
|
||||
#!/bin/bash |
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
set -e |
||||
|
||||
echo "------ dolphinscheduler start - build -------" |
||||
printenv |
||||
|
||||
if [ -z "${VERSION}" ] |
||||
then |
||||
echo "set default environment variable [VERSION]" |
||||
VERSION=$(cat $(pwd)/sql/soft_version) |
||||
fi |
||||
|
||||
if [ "${DOCKER_REPO}x" = "x" ] |
||||
then |
||||
echo "set default environment variable [DOCKER_REPO]" |
||||
DOCKER_REPO='dolphinscheduler' |
||||
fi |
||||
|
||||
echo "Version: $VERSION" |
||||
echo "Repo: $DOCKER_REPO" |
||||
|
||||
echo -e "Current Directory is $(pwd)\n" |
||||
|
||||
# maven package(Project Directory) |
||||
echo -e "mvn -B clean compile package -Prelease -Dmaven.test.skip=true" |
||||
mvn -B clean compile package -Prelease -Dmaven.test.skip=true |
||||
|
||||
# mv dolphinscheduler-bin.tar.gz file to dockerfile directory |
||||
echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz $(pwd)/dockerfile/\n" |
||||
mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz $(pwd)/dockerfile/ |
||||
|
||||
# docker build |
||||
echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/dockerfile/\n" |
||||
docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/dockerfile/ |
||||
|
||||
echo "------ dolphinscheduler end - build -------" |
@ -1,56 +0,0 @@
|
||||
:: Licensed to the Apache Software Foundation (ASF) under one or more |
||||
:: contributor license agreements. See the NOTICE file distributed with |
||||
:: this work for additional information regarding copyright ownership. |
||||
:: The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
:: (the "License"); you may not use this file except in compliance with |
||||
:: the License. You may obtain a copy of the License at |
||||
:: |
||||
:: http://www.apache.org/licenses/LICENSE-2.0 |
||||
:: |
||||
:: Unless required by applicable law or agreed to in writing, software |
||||
:: distributed under the License is distributed on an "AS IS" BASIS, |
||||
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
:: See the License for the specific language governing permissions and |
||||
:: limitations under the License. |
||||
:: |
||||
@echo off |
||||
|
||||
echo "------ dolphinscheduler start - build -------" |
||||
set |
||||
|
||||
if not defined VERSION ( |
||||
echo "set environment variable [VERSION]" |
||||
for /f %%l in (%cd%\sql\soft_version) do (set VERSION=%%l) |
||||
) |
||||
|
||||
if not defined DOCKER_REPO ( |
||||
echo "set environment variable [DOCKER_REPO]" |
||||
set DOCKER_REPO='dolphinscheduler' |
||||
) |
||||
|
||||
echo "Version: %VERSION%" |
||||
echo "Repo: %DOCKER_REPO%" |
||||
|
||||
echo "Current Directory is %cd%" |
||||
|
||||
:: maven package(Project Directory) |
||||
echo "call mvn clean compile package -Prelease" |
||||
call mvn clean compile package -Prelease -DskipTests=true |
||||
if "%errorlevel%"=="1" goto :mvnFailed |
||||
|
||||
:: move dolphinscheduler-bin.tar.gz file to dockerfile directory |
||||
echo "move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\" |
||||
move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\ |
||||
|
||||
:: docker build |
||||
echo "docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\" |
||||
docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\ |
||||
if "%errorlevel%"=="1" goto :dockerBuildFailed |
||||
|
||||
echo "------ dolphinscheduler end - build -------" |
||||
|
||||
:mvnFailed |
||||
echo "MAVEN PACKAGE FAILED!" |
||||
|
||||
:dockerBuildFailed |
||||
echo "DOCKER BUILD FAILED!" |
@ -1,35 +0,0 @@
|
||||
#!/bin/bash |
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
echo "------ dolphinscheduler check - server - status -------" |
||||
sleep 60 |
||||
server_num=$(docker top `docker container list | grep '/sbin/tini' | awk '{print $1}'`| grep java | grep "dolphinscheduler" | awk -F 'classpath ' '{print $2}' | awk '{print $2}' | sort | uniq -c | wc -l) |
||||
if [ $server_num -eq 5 ] |
||||
then |
||||
echo "Server all start successfully" |
||||
else |
||||
echo "Server start failed "$server_num |
||||
exit 1 |
||||
fi |
||||
ready=`curl http://127.0.0.1:8888/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l` |
||||
if [ $ready -eq 1 ] |
||||
then |
||||
echo "Servers is ready" |
||||
else |
||||
echo "Servers is not ready" |
||||
exit 1 |
||||
fi |
@ -1,24 +0,0 @@
|
||||
#!/bin/bash |
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
echo "------ push start -------" |
||||
printenv |
||||
|
||||
docker push $DOCKER_REPO:${VERSION} |
||||
|
||||
echo "------ push end -------" |
@ -1,23 +0,0 @@
|
||||
:: Licensed to the Apache Software Foundation (ASF) under one or more |
||||
:: contributor license agreements. See the NOTICE file distributed with |
||||
:: this work for additional information regarding copyright ownership. |
||||
:: The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
:: (the "License"); you may not use this file except in compliance with |
||||
:: the License. You may obtain a copy of the License at |
||||
:: |
||||
:: http://www.apache.org/licenses/LICENSE-2.0 |
||||
:: |
||||
:: Unless required by applicable law or agreed to in writing, software |
||||
:: distributed under the License is distributed on an "AS IS" BASIS, |
||||
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
:: See the License for the specific language governing permissions and |
||||
:: limitations under the License. |
||||
:: |
||||
@echo off |
||||
|
||||
echo "------ push start -------" |
||||
set |
||||
|
||||
docker push %DOCKER_REPO%:%VERSION% |
||||
|
||||
echo "------ push end -------" |
@ -1,104 +0,0 @@
|
||||
#!/bin/bash |
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
set -e |
||||
|
||||
echo "init env variables" |
||||
|
||||
# Define parameters default value. |
||||
#============================================================================ |
||||
# Database Source |
||||
#============================================================================ |
||||
export POSTGRESQL_HOST=${POSTGRESQL_HOST:-"127.0.0.1"} |
||||
export POSTGRESQL_PORT=${POSTGRESQL_PORT:-"5432"} |
||||
export POSTGRESQL_USERNAME=${POSTGRESQL_USERNAME:-"root"} |
||||
export POSTGRESQL_PASSWORD=${POSTGRESQL_PASSWORD:-"root"} |
||||
export POSTGRESQL_DATABASE=${POSTGRESQL_DATABASE:-"dolphinscheduler"} |
||||
|
||||
#============================================================================ |
||||
# System |
||||
#============================================================================ |
||||
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"} |
||||
export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"} |
||||
|
||||
#============================================================================ |
||||
# Zookeeper |
||||
#============================================================================ |
||||
export ZOOKEEPER_QUORUM=${ZOOKEEPER_QUORUM:-"127.0.0.1:2181"} |
||||
|
||||
#============================================================================ |
||||
# Master Server |
||||
#============================================================================ |
||||
export MASTER_EXEC_THREADS=${MASTER_EXEC_THREADS:-"100"} |
||||
export MASTER_EXEC_TASK_NUM=${MASTER_EXEC_TASK_NUM:-"20"} |
||||
export MASTER_HEARTBEAT_INTERVAL=${MASTER_HEARTBEAT_INTERVAL:-"10"} |
||||
export MASTER_TASK_COMMIT_RETRYTIMES=${MASTER_TASK_COMMIT_RETRYTIMES:-"5"} |
||||
export MASTER_TASK_COMMIT_INTERVAL=${MASTER_TASK_COMMIT_INTERVAL:-"1000"} |
||||
export MASTER_MAX_CPULOAD_AVG=${MASTER_MAX_CPULOAD_AVG:-"100"} |
||||
export MASTER_RESERVED_MEMORY=${MASTER_RESERVED_MEMORY:-"0.1"} |
||||
export MASTER_LISTEN_PORT=${MASTER_LISTEN_PORT:-"5678"} |
||||
|
||||
#============================================================================ |
||||
# Worker Server |
||||
#============================================================================ |
||||
export WORKER_EXEC_THREADS=${WORKER_EXEC_THREADS:-"100"} |
||||
export WORKER_HEARTBEAT_INTERVAL=${WORKER_HEARTBEAT_INTERVAL:-"10"} |
||||
export WORKER_FETCH_TASK_NUM=${WORKER_FETCH_TASK_NUM:-"3"} |
||||
export WORKER_MAX_CPULOAD_AVG=${WORKER_MAX_CPULOAD_AVG:-"100"} |
||||
export WORKER_RESERVED_MEMORY=${WORKER_RESERVED_MEMORY:-"0.1"} |
||||
export WORKER_LISTEN_PORT=${WORKER_LISTEN_PORT:-"1234"} |
||||
export WORKER_GROUP=${WORKER_GROUP:-"default"} |
||||
|
||||
#============================================================================ |
||||
# Alert Server |
||||
#============================================================================ |
||||
# XLS FILE |
||||
export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"} |
||||
# mail |
||||
export MAIL_SERVER_HOST=${MAIL_SERVER_HOST:-""} |
||||
export MAIL_SERVER_PORT=${MAIL_SERVER_PORT:-""} |
||||
export MAIL_SENDER=${MAIL_SENDER:-""} |
||||
export MAIL_USER=${MAIL_USER:-""} |
||||
export MAIL_PASSWD=${MAIL_PASSWD:-""} |
||||
export MAIL_SMTP_STARTTLS_ENABLE=${MAIL_SMTP_STARTTLS_ENABLE:-"true"} |
||||
export MAIL_SMTP_SSL_ENABLE=${MAIL_SMTP_SSL_ENABLE:-"false"} |
||||
export MAIL_SMTP_SSL_TRUST=${MAIL_SMTP_SSL_TRUST:-""} |
||||
# wechat |
||||
export ENTERPRISE_WECHAT_ENABLE=${ENTERPRISE_WECHAT_ENABLE:-"false"} |
||||
export ENTERPRISE_WECHAT_CORP_ID=${ENTERPRISE_WECHAT_CORP_ID:-""} |
||||
export ENTERPRISE_WECHAT_SECRET=${ENTERPRISE_WECHAT_SECRET:-""} |
||||
export ENTERPRISE_WECHAT_AGENT_ID=${ENTERPRISE_WECHAT_AGENT_ID:-""} |
||||
export ENTERPRISE_WECHAT_USERS=${ENTERPRISE_WECHAT_USERS:-""} |
||||
|
||||
#============================================================================ |
||||
# Frontend |
||||
#============================================================================ |
||||
export FRONTEND_API_SERVER_HOST=${FRONTEND_API_SERVER_HOST:-"127.0.0.1"} |
||||
export FRONTEND_API_SERVER_PORT=${FRONTEND_API_SERVER_PORT:-"12345"} |
||||
|
||||
echo "generate app config" |
||||
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do |
||||
eval "cat << EOF |
||||
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line}) |
||||
EOF |
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} |
||||
done |
||||
|
||||
echo "generate nginx config" |
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
@ -1,196 +0,0 @@
|
||||
#!/bin/bash |
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
set -e |
||||
|
||||
DOLPHINSCHEDULER_BIN=${DOLPHINSCHEDULER_HOME}/bin |
||||
DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script |
||||
DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs |
||||
|
||||
# start postgresql |
||||
initPostgreSQL() { |
||||
echo "checking postgresql" |
||||
if [ -n "$(ifconfig | grep ${POSTGRESQL_HOST})" ]; then |
||||
echo "start postgresql service" |
||||
rc-service postgresql restart |
||||
|
||||
# role if not exists, create |
||||
flag=$(sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='${POSTGRESQL_USERNAME}'") |
||||
if [ -z "${flag}" ]; then |
||||
echo "create user" |
||||
sudo -u postgres psql -tAc "create user ${POSTGRESQL_USERNAME} with password '${POSTGRESQL_PASSWORD}'" |
||||
fi |
||||
|
||||
# database if not exists, create |
||||
flag=$(sudo -u postgres psql -tAc "select 1 from pg_database where datname='dolphinscheduler'") |
||||
if [ -z "${flag}" ]; then |
||||
echo "init db" |
||||
sudo -u postgres psql -tAc "create database dolphinscheduler owner ${POSTGRESQL_USERNAME}" |
||||
fi |
||||
|
||||
# grant |
||||
sudo -u postgres psql -tAc "grant all privileges on database dolphinscheduler to ${POSTGRESQL_USERNAME}" |
||||
fi |
||||
|
||||
echo "connect postgresql service" |
||||
v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1") |
||||
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then |
||||
echo "Can't connect to database...${v}" |
||||
exit 1 |
||||
fi |
||||
|
||||
echo "import sql data" |
||||
${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh |
||||
} |
||||
|
||||
# start zk |
||||
initZK() { |
||||
echo -e "checking zookeeper" |
||||
if [[ "${ZOOKEEPER_QUORUM}" = "127.0.0.1:2181" || "${ZOOKEEPER_QUORUM}" = "localhost:2181" ]]; then |
||||
echo "start local zookeeper" |
||||
/opt/zookeeper/bin/zkServer.sh restart |
||||
else |
||||
echo "connect remote zookeeper" |
||||
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do |
||||
while ! nc -z ${line%:*} ${line#*:}; do |
||||
counter=$((counter+1)) |
||||
if [ $counter == 30 ]; then |
||||
echo "Error: Couldn't connect to zookeeper." |
||||
exit 1 |
||||
fi |
||||
echo "Trying to connect to zookeeper at ${line}. Attempt $counter." |
||||
sleep 5 |
||||
done |
||||
done |
||||
fi |
||||
} |
||||
|
||||
# start nginx |
||||
initNginx() { |
||||
echo "start nginx" |
||||
nginx & |
||||
} |
||||
|
||||
# start master-server |
||||
initMasterServer() { |
||||
echo "start master-server" |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop master-server |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start master-server |
||||
} |
||||
|
||||
# start worker-server |
||||
initWorkerServer() { |
||||
echo "start worker-server" |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop worker-server |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start worker-server |
||||
} |
||||
|
||||
# start api-server |
||||
initApiServer() { |
||||
echo "start api-server" |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop api-server |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start api-server |
||||
} |
||||
|
||||
# start logger-server |
||||
initLoggerServer() { |
||||
echo "start logger-server" |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop logger-server |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start logger-server |
||||
} |
||||
|
||||
# start alert-server |
||||
initAlertServer() { |
||||
echo "start alert-server" |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop alert-server |
||||
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start alert-server |
||||
} |
||||
|
||||
# print usage |
||||
printUsage() { |
||||
echo -e "Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system," |
||||
echo -e "dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.\n" |
||||
echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server | frontend ]\n" |
||||
printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server, alert-server and frontend." |
||||
printf "%-13s: %s\n" "master-server" "MasterServer is mainly responsible for DAG task split, task submission monitoring." |
||||
printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services.." |
||||
printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests from the front-end UI layer." |
||||
printf "%-13s: %s\n" "alert-server" "AlertServer mainly include Alarms." |
||||
printf "%-13s: %s\n" "frontend" "Frontend mainly provides various visual operation interfaces of the system." |
||||
} |
||||
|
||||
# init config file |
||||
source /root/startup-init-conf.sh |
||||
|
||||
LOGFILE=/var/log/nginx/access.log |
||||
case "$1" in |
||||
(all) |
||||
initZK |
||||
initPostgreSQL |
||||
initMasterServer |
||||
initWorkerServer |
||||
initApiServer |
||||
initAlertServer |
||||
initLoggerServer |
||||
initNginx |
||||
LOGFILE=/var/log/nginx/access.log |
||||
;; |
||||
(master-server) |
||||
initZK |
||||
initPostgreSQL |
||||
initMasterServer |
||||
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log |
||||
;; |
||||
(worker-server) |
||||
initZK |
||||
initPostgreSQL |
||||
initWorkerServer |
||||
initLoggerServer |
||||
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log |
||||
;; |
||||
(api-server) |
||||
initZK |
||||
initPostgreSQL |
||||
initApiServer |
||||
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log |
||||
;; |
||||
(alert-server) |
||||
initPostgreSQL |
||||
initAlertServer |
||||
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log |
||||
;; |
||||
(frontend) |
||||
initNginx |
||||
LOGFILE=/var/log/nginx/access.log |
||||
;; |
||||
(help) |
||||
printUsage |
||||
exit 1 |
||||
;; |
||||
(*) |
||||
printUsage |
||||
exit 1 |
||||
;; |
||||
esac |
||||
|
||||
# init directories and log files |
||||
mkdir -p ${DOLPHINSCHEDULER_LOGS} && mkdir -p /var/log/nginx/ && cat /dev/null >> ${LOGFILE} |
||||
|
||||
echo "tail begin" |
||||
exec bash -c "tail -n 1 -f ${LOGFILE}" |
||||
|
@ -1,67 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
# For unit test |
||||
|
||||
#alert type is EMAIL/SMS |
||||
alert.type=EMAIL |
||||
|
||||
# mail server configuration |
||||
mail.protocol=SMTP |
||||
mail.server.host=xxx.xxx.test |
||||
mail.server.port=25 |
||||
mail.sender=xxx@xxx.com |
||||
mail.user=xxx@xxx.com |
||||
mail.passwd=111111 |
||||
|
||||
# Test double |
||||
test.server.factor=3.0 |
||||
|
||||
|
||||
# Test NumberFormat |
||||
test.server.testnumber=abc |
||||
|
||||
# Test array |
||||
test.server.list=xxx.xxx.test1,xxx.xxx.test2,xxx.xxx.test3 |
||||
|
||||
# Test enum |
||||
test.server.enum1=MASTER |
||||
test.server.enum2=DEAD_SERVER |
||||
test.server.enum3=abc |
||||
|
||||
# TLS |
||||
mail.smtp.starttls.enable=true |
||||
# SSL |
||||
mail.smtp.ssl.enable=false |
||||
mail.smtp.ssl.trust=xxx.xxx.com |
||||
|
||||
#xls file path,need create if not exist |
||||
xls.file.path=/tmp/xls |
||||
|
||||
# Enterprise WeChat configuration |
||||
enterprise.wechat.enable=false |
||||
enterprise.wechat.corp.id=xxxxxxx |
||||
enterprise.wechat.secret=xxxxxxx |
||||
enterprise.wechat.agent.id=xxxxxxx |
||||
enterprise.wechat.users=xxxxxxx |
||||
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret |
||||
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token |
||||
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"} |
||||
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}} |
||||
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue