Browse Source

[Improvement][Docker] Sync docker conf templates to the latest conf properties (#4772)

pull/3/MERGE
Shiwen Cheng 4 years ago committed by GitHub
parent
commit
149b0dab4d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      docker/README.md
  2. 2
      docker/build/Dockerfile
  3. 46
      docker/build/conf/dolphinscheduler/alert.properties.tpl
  4. 25
      docker/build/conf/dolphinscheduler/application-api.properties.tpl
  5. 66
      docker/build/conf/dolphinscheduler/common.properties.tpl
  6. 15
      docker/build/conf/dolphinscheduler/datasource.properties.tpl
  7. 4
      docker/build/conf/dolphinscheduler/logback/logback-api.xml
  8. 4
      docker/build/conf/dolphinscheduler/logback/logback-master.xml
  9. 9
      docker/build/conf/dolphinscheduler/logback/logback-worker.xml
  10. 5
      docker/build/conf/dolphinscheduler/master.properties.tpl
  11. 14
      docker/build/conf/dolphinscheduler/worker.properties.tpl
  12. 1
      docker/build/conf/dolphinscheduler/zookeeper.properties.tpl
  13. 27
      docker/build/startup-init-conf.sh
  14. 38
      docker/build/startup.sh
  15. 28
      docker/docker-swarm/docker-compose.yml
  16. 28
      docker/docker-swarm/docker-stack.yml
  17. 2
      dolphinscheduler-alert/src/main/resources/alert.properties
  18. 6
      dolphinscheduler-api/src/main/resources/application-api.properties
  19. 4
      dolphinscheduler-api/src/main/resources/logback-api.xml
  20. 9
      dolphinscheduler-common/src/main/resources/common.properties
  21. 2
      dolphinscheduler-server/src/main/resources/master.properties
  22. 2
      dolphinscheduler-service/src/main/resources/logback-zookeeper.xml

1
docker/README.md

@ -0,0 +1 @@
# Dolphin Scheduler for Docker

2
docker/build/Dockerfile

@ -57,6 +57,6 @@ RUN dos2unix /root/checkpoint.sh && \
echo "Set disable_coredump false" >> /etc/sudo.conf echo "Set disable_coredump false" >> /etc/sudo.conf
# 4. expose port # 4. expose port
EXPOSE 5678 1234 12345 50051 EXPOSE 5678 1234 12345 50051 50052
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"] ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]

46
docker/build/conf/dolphinscheduler/alert.properties.tpl

@ -14,43 +14,17 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
#alert type is EMAIL/SMS
alert.type=EMAIL
# alter msg template, default is html template
#alert.template=html
# mail server configuration
mail.protocol=SMTP
mail.server.host=${MAIL_SERVER_HOST}
mail.server.port=${MAIL_SERVER_PORT}
mail.sender=${MAIL_SENDER}
mail.user=${MAIL_USER}
mail.passwd=${MAIL_PASSWD}
# TLS
mail.smtp.starttls.enable=${MAIL_SMTP_STARTTLS_ENABLE}
# SSL
mail.smtp.ssl.enable=${MAIL_SMTP_SSL_ENABLE}
mail.smtp.ssl.trust=${MAIL_SMTP_SSL_TRUST}
#xls file path,need create if not exist
xls.file.path=${XLS_FILE_PATH}
# plugins dir
plugin.dir=${ALERT_PLUGIN_DIR}
# Enterprise WeChat configuration
enterprise.wechat.enable=${ENTERPRISE_WECHAT_ENABLE}
enterprise.wechat.corp.id=${ENTERPRISE_WECHAT_CORP_ID}
enterprise.wechat.secret=${ENTERPRISE_WECHAT_SECRET}
enterprise.wechat.agent.id=${ENTERPRISE_WECHAT_AGENT_ID}
enterprise.wechat.users=${ENTERPRISE_WECHAT_USERS}
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={corpId}&corpsecret={secret}
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={token}
enterprise.wechat.team.send.msg={\"toparty\":\"{toParty}\",\"agentid\":\"{agentId}\",\"msgtype\":\"text\",\"text\":{\"content\":\"{msg}\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"{toUser}\",\"agentid\":\"{agentId}\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"{msg}\"}}
#This configuration file configures the configuration parameters related to the AlertServer.
#These parameters are only related to the AlertServer, and it has nothing to do with the specific Alert Plugin.
#eg : max retry num.
#eg : Alert Server Listener port
#alert.plugin.dir config the Alert Plugin dir . AlertServer while find and load the Alert Plugin Jar from this dir when deploy and start AlertServer on the server .
alert.plugin.dir=${ALERT_PLUGIN_DIR}
#maven.local.repository=/Users/gaojun/Documents/jianguoyun/localRepository
#alert.plugin.binding config the Alert Plugin need be load when development and run in IDE
#alert.plugin.binding=\
# ./dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/pom.xml

25
docker/build/conf/dolphinscheduler/application-api.properties.tpl

@ -24,12 +24,19 @@ server.servlet.session.timeout=7200
# servlet config # servlet config
server.servlet.context-path=/dolphinscheduler/ server.servlet.context-path=/dolphinscheduler/
# time zone
spring.jackson.time-zone=GMT+8
# file size limit for upload # file size limit for upload
spring.servlet.multipart.max-file-size=1024MB spring.servlet.multipart.max-file-size=1024MB
spring.servlet.multipart.max-request-size=1024MB spring.servlet.multipart.max-request-size=1024MB
# enable response compression
server.compression.enabled=true
server.compression.mime-types=text/html,text/xml,text/plain,text/css,text/javascript,application/javascript,application/json,application/xml
# post content # post content
server.jetty.max-http-post-size=5000000 server.jetty.max-http-form-post-size=5000000
# i18n # i18n
spring.messages.encoding=UTF-8 spring.messages.encoding=UTF-8
@ -40,6 +47,16 @@ spring.messages.basename=i18n/messages
# Authentication types (supported types: PASSWORD) # Authentication types (supported types: PASSWORD)
security.authentication.type=PASSWORD security.authentication.type=PASSWORD
#============================================================================
# LDAP Config
# mock ldap server from https://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
#============================================================================
# admin userId
#security.authentication.ldap.user.admin=read-only-admin
# ldap server config
#ldap.urls=ldap://ldap.forumsys.com:389/
#ldap.base.dn=dc=example,dc=com
#ldap.username=cn=read-only-admin,dc=example,dc=com
#ldap.password=password
#ldap.user.identity.attribute=uid
#ldap.user.email.attribute=mail

66
docker/build/conf/dolphinscheduler/common.properties.tpl

@ -15,64 +15,64 @@
# limitations under the License. # limitations under the License.
# #
#============================================================================ # resource storage type : HDFS, S3, NONE
# System
#============================================================================
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH}
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH}
# resource upload startup type : HDFS,S3,NONE
resource.storage.type=${RESOURCE_STORAGE_TYPE} resource.storage.type=${RESOURCE_STORAGE_TYPE}
#============================================================================
# HDFS
#============================================================================
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
resource.upload.path=${RESOURCE_UPLOAD_PATH} resource.upload.path=${RESOURCE_UPLOAD_PATH}
# user data local directory path, please make sure the directory exists and have read write permissions
data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH}
# whether kerberos starts # whether kerberos starts
#hadoop.security.authentication.startup.state=false hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path # java.security.krb5.conf path
#java.security.krb5.conf.path=/opt/krb5.conf java.security.krb5.conf.path=/opt/krb5.conf
# loginUserFromKeytab user # login user from keytab username
#login.user.keytab.username=hdfs-mycluster@ESZ.COM login.user.keytab.username=hdfs-mycluster@ESZ.COM
# loginUserFromKeytab path # login user from keytab path
#login.user.keytab.path=/opt/hdfs.headless.keytab login.user.keytab.path=/opt/hdfs.headless.keytab
#resource.view.suffixs #resource.view.suffixs
#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties #resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
# if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path # if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path
hdfs.root.user=hdfs hdfs.root.user=hdfs
# kerberos expire time # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
kerberos.expire.time=7
#============================================================================
# S3
#============================================================================
# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
fs.defaultFS=${FS_DEFAULT_FS} fs.defaultFS=${FS_DEFAULT_FS}
# if resource.storage.type=S3s3 endpoint # if resource.storage.type=S3, s3 endpoint
fs.s3a.endpoint=${FS_S3A_ENDPOINT} fs.s3a.endpoint=${FS_S3A_ENDPOINT}
# if resource.storage.type=S3s3 access key # if resource.storage.type=S3, s3 access key
fs.s3a.access.key=${FS_S3A_ACCESS_KEY} fs.s3a.access.key=${FS_S3A_ACCESS_KEY}
# if resource.storage.type=S3s3 secret key # if resource.storage.type=S3, s3 secret key
fs.s3a.secret.key=${FS_S3A_SECRET_KEY} fs.s3a.secret.key=${FS_S3A_SECRET_KEY}
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO # if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname. # if resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname.
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s yarn.application.status.address=http://ds1:8088/ws/v1/cluster/apps/%s
# job history status url when application number threshold is reached(default 10000,maybe it was set to 1000)
yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
# system env path, If you want to set your own path, you need to set this env file to an absolute path
dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH}
development.state=false
# kerberos tgt expire time, unit is hours
kerberos.expire.time=2
# datasource encryption salt
datasource.encryption.enable=false
datasource.encryption.salt=!@#$%^&*
# Network IP gets priority, default inner outer
#dolphin.scheduler.network.priority.strategy=default

15
docker/build/conf/dolphinscheduler/datasource.properties.tpl

@ -17,12 +17,21 @@
# db # db
spring.datasource.driver-class-name=${DATABASE_DRIVER} spring.datasource.driver-class-name=${DATABASE_DRIVER}
spring.datasource.url=jdbc:${DATABASE_TYPE}://${DATABASE_HOST}:${DATABASE_PORT}/${DATABASE_DATABASE}?${DATABASE_PARAMS} spring.datasource.url=jdbc:${DATABASE_TYPE}://${DATABASE_HOST}:${DATABASE_PORT}/${DATABASE_DATABASE}${DATABASE_PARAMS:+?${DATABASE_PARAMS}}
spring.datasource.username=${DATABASE_USERNAME} spring.datasource.username=${DATABASE_USERNAME}
spring.datasource.password=${DATABASE_PASSWORD} spring.datasource.password=${DATABASE_PASSWORD}
## base spring data source configuration todo need to remove # postgresql
#spring.datasource.type=com.alibaba.druid.pool.DruidDataSource #spring.datasource.driver-class-name=org.postgresql.Driver
#spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
#spring.datasource.username=test
#spring.datasource.password=test
# mysql
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.url=jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
#spring.datasource.username=xxxx
#spring.datasource.password=xxxx
# connection configuration # connection configuration
#spring.datasource.initialSize=5 #spring.datasource.initialSize=5

4
docker/build/conf/dolphinscheduler/logback/logback-api.xml

@ -23,12 +23,12 @@
<!-- api server logback config start --> <!-- api server logback config start -->
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-api-server.log</file> <file>${log.base}/dolphinscheduler-api.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level> <level>INFO</level>
</filter> </filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> <fileNamePattern>${log.base}/dolphinscheduler-api.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory> <maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize> <maxFileSize>64MB</maxFileSize>
</rollingPolicy> </rollingPolicy>

4
docker/build/conf/dolphinscheduler/logback/logback-master.xml

@ -24,9 +24,9 @@
<conversionRule conversionWord="messsage" <conversionRule conversionWord="messsage"
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/> converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender"> <appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <!-- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level> <level>INFO</level>
</filter> </filter> -->
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/> <filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/>
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator"> <Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
<key>taskAppId</key> <key>taskAppId</key>

9
docker/build/conf/dolphinscheduler/logback/logback-worker.xml

@ -25,9 +25,9 @@
<conversionRule conversionWord="messsage" <conversionRule conversionWord="messsage"
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/> converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender"> <appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <!-- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level> <level>INFO</level>
</filter> </filter> -->
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/> <filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/>
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator"> <Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
<key>taskAppId</key> <key>taskAppId</key>
@ -48,10 +48,9 @@
</appender> </appender>
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-worker.log</file> <file>${log.base}/dolphinscheduler-worker.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <!-- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level> <level>INFO</level>
</filter> </filter> -->
<filter class="org.apache.dolphinscheduler.server.log.WorkerLogFilter"/>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> <fileNamePattern>${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory> <maxHistory>168</maxHistory>

5
docker/build/conf/dolphinscheduler/master.properties.tpl

@ -21,6 +21,9 @@ master.exec.threads=${MASTER_EXEC_THREADS}
# master execute task number in parallel # master execute task number in parallel
master.exec.task.num=${MASTER_EXEC_TASK_NUM} master.exec.task.num=${MASTER_EXEC_TASK_NUM}
# master dispatch task number
#master.dispatch.task.num=3
# master heartbeat interval # master heartbeat interval
master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL} master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL}
@ -37,4 +40,4 @@ master.max.cpuload.avg=${MASTER_MAX_CPULOAD_AVG}
master.reserved.memory=${MASTER_RESERVED_MEMORY} master.reserved.memory=${MASTER_RESERVED_MEMORY}
# master listen port # master listen port
#master.listen.port=${MASTER_LISTEN_PORT} master.listen.port=${MASTER_LISTEN_PORT}

14
docker/build/conf/dolphinscheduler/worker.properties.tpl

@ -21,20 +21,20 @@ worker.exec.threads=${WORKER_EXEC_THREADS}
# worker heartbeat interval # worker heartbeat interval
worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL} worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL}
# submit the number of tasks at a time # only less than cpu avg load, worker server can work. default value -1: the number of cpu cores * 2
worker.fetch.task.num=${WORKER_FETCH_TASK_NUM}
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG} worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG}
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G. # only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=${WORKER_RESERVED_MEMORY} worker.reserved.memory=${WORKER_RESERVED_MEMORY}
# worker listener port # worker listener port
#worker.listen.port=${WORKER_LISTEN_PORT} worker.listen.port=${WORKER_LISTEN_PORT}
# default worker group # default worker group
#worker.groups=${WORKER_GROUP} worker.groups=${WORKER_GROUP}
# default worker weight # default worker weight
#worker.weight=${WORKER_WEIGHT} worker.weight=${WORKER_WEIGHT}
# alert server listener host
alert.listen.host=${ALERT_LISTEN_HOST}

1
docker/build/conf/dolphinscheduler/zookeeper.properties.tpl

@ -27,3 +27,4 @@ zookeeper.dolphinscheduler.root=${ZOOKEEPER_ROOT}
#zookeeper.retry.base.sleep=100 #zookeeper.retry.base.sleep=100
#zookeeper.retry.max.sleep=30000 #zookeeper.retry.max.sleep=30000
#zookeeper.retry.maxtime=10 #zookeeper.retry.maxtime=10
#zookeeper.max.wait.time=10000

27
docker/build/startup-init-conf.sh

@ -20,7 +20,8 @@ set -e
echo "init env variables" echo "init env variables"
# Define parameters default value. # Define parameters default value
#============================================================================ #============================================================================
# Database Source # Database Source
#============================================================================ #============================================================================
@ -34,7 +35,7 @@ export DATABASE_DRIVER=${DATABASE_DRIVER:-"org.postgresql.Driver"}
export DATABASE_PARAMS=${DATABASE_PARAMS:-"characterEncoding=utf8"} export DATABASE_PARAMS=${DATABASE_PARAMS:-"characterEncoding=utf8"}
#============================================================================ #============================================================================
# System # Common
#============================================================================ #============================================================================
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"} export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"}
export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"} export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"}
@ -69,35 +70,17 @@ export MASTER_LISTEN_PORT=${MASTER_LISTEN_PORT:-"5678"}
#============================================================================ #============================================================================
export WORKER_EXEC_THREADS=${WORKER_EXEC_THREADS:-"100"} export WORKER_EXEC_THREADS=${WORKER_EXEC_THREADS:-"100"}
export WORKER_HEARTBEAT_INTERVAL=${WORKER_HEARTBEAT_INTERVAL:-"10"} export WORKER_HEARTBEAT_INTERVAL=${WORKER_HEARTBEAT_INTERVAL:-"10"}
export WORKER_FETCH_TASK_NUM=${WORKER_FETCH_TASK_NUM:-"3"}
export WORKER_MAX_CPULOAD_AVG=${WORKER_MAX_CPULOAD_AVG:-"100"} export WORKER_MAX_CPULOAD_AVG=${WORKER_MAX_CPULOAD_AVG:-"100"}
export WORKER_RESERVED_MEMORY=${WORKER_RESERVED_MEMORY:-"0.1"} export WORKER_RESERVED_MEMORY=${WORKER_RESERVED_MEMORY:-"0.1"}
export WORKER_LISTEN_PORT=${WORKER_LISTEN_PORT:-"1234"} export WORKER_LISTEN_PORT=${WORKER_LISTEN_PORT:-"1234"}
export WORKER_GROUP=${WORKER_GROUP:-"default"} export WORKER_GROUP=${WORKER_GROUP:-"default"}
export WORKER_WEIGHT=${WORKER_WEIGHT:-"100"} export WORKER_WEIGHT=${WORKER_WEIGHT:-"100"}
export ALERT_LISTEN_HOST=${ALERT_LISTEN_HOST:-"127.0.0.1"}
#============================================================================ #============================================================================
# Alert Server # Alert Server
#============================================================================ #============================================================================
# alert plugin dir export ALERT_PLUGIN_DIR=${ALERT_PLUGIN_DIR:-"lib/plugin/alert"}
export ALERT_PLUGIN_DIR=${ALERT_PLUGIN_DIR:-"/opt/dolphinscheduler"}
# xls file
export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"}
# mail
export MAIL_SERVER_HOST=${MAIL_SERVER_HOST:-""}
export MAIL_SERVER_PORT=${MAIL_SERVER_PORT:-""}
export MAIL_SENDER=${MAIL_SENDER:-""}
export MAIL_USER=${MAIL_USER:-""}
export MAIL_PASSWD=${MAIL_PASSWD:-""}
export MAIL_SMTP_STARTTLS_ENABLE=${MAIL_SMTP_STARTTLS_ENABLE:-"true"}
export MAIL_SMTP_SSL_ENABLE=${MAIL_SMTP_SSL_ENABLE:-"false"}
export MAIL_SMTP_SSL_TRUST=${MAIL_SMTP_SSL_TRUST:-""}
# wechat
export ENTERPRISE_WECHAT_ENABLE=${ENTERPRISE_WECHAT_ENABLE:-"false"}
export ENTERPRISE_WECHAT_CORP_ID=${ENTERPRISE_WECHAT_CORP_ID:-""}
export ENTERPRISE_WECHAT_SECRET=${ENTERPRISE_WECHAT_SECRET:-""}
export ENTERPRISE_WECHAT_AGENT_ID=${ENTERPRISE_WECHAT_AGENT_ID:-""}
export ENTERPRISE_WECHAT_USERS=${ENTERPRISE_WECHAT_USERS:-""}
echo "generate app config" echo "generate app config"
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do

38
docker/build/startup.sh

@ -26,7 +26,7 @@ DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
waitDatabase() { waitDatabase() {
echo "test ${DATABASE_TYPE} service" echo "test ${DATABASE_TYPE} service"
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1)) local counter=$((counter+1))
if [ $counter == 30 ]; then if [ $counter == 30 ]; then
echo "Error: Couldn't connect to ${DATABASE_TYPE}." echo "Error: Couldn't connect to ${DATABASE_TYPE}."
exit 1 exit 1
@ -57,12 +57,41 @@ initDatabase() {
${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh ${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh
} }
# check ds version
checkDSVersion() {
if [ ${DATABASE_TYPE} = "mysql" ]; then
v=$(mysql -h${DATABASE_HOST} -P${DATABASE_PORT} -u${DATABASE_USERNAME} --password=${DATABASE_PASSWORD} -D ${DATABASE_DATABASE} -e "SELECT * FROM public.t_ds_version" 2>/dev/null)
else
v=$(PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "SELECT * FROM public.t_ds_version" 2>/dev/null)
fi
if [ -n "$v" ]; then
echo "ds version: $v"
return 0
else
return 1
fi
}
# check init database
checkInitDatabase() {
echo "check init database"
while ! checkDSVersion; do
local counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't check init database."
exit 1
fi
echo "Trying to check init database. Attempt $counter."
sleep 5
done
}
# wait zk # wait zk
waitZK() { waitZK() {
echo "connect remote zookeeper" echo "connect remote zookeeper"
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1)) local counter=$((counter+1))
if [ $counter == 30 ]; then if [ $counter == 30 ]; then
echo "Error: Couldn't connect to zookeeper." echo "Error: Couldn't connect to zookeeper."
exit 1 exit 1
@ -133,7 +162,7 @@ case "$1" in
initApiServer initApiServer
initAlertServer initAlertServer
initLoggerServer initLoggerServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api.log
;; ;;
(master-server) (master-server)
waitZK waitZK
@ -153,10 +182,11 @@ case "$1" in
waitDatabase waitDatabase
initDatabase initDatabase
initApiServer initApiServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api.log
;; ;;
(alert-server) (alert-server)
waitDatabase waitDatabase
checkInitDatabase
initAlertServer initAlertServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log
;; ;;

28
docker/docker-swarm/docker-compose.yml

@ -87,22 +87,11 @@ services:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-alert container_name: dolphinscheduler-alert
command: alert-server command: alert-server
ports:
- 50052:50052
environment: environment:
TZ: Asia/Shanghai TZ: Asia/Shanghai
XLS_FILE_PATH: "/tmp/xls" ALERT_PLUGIN_DIR: lib/plugin/alert
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
ENTERPRISE_WECHAT_ENABLE: "false"
ENTERPRISE_WECHAT_CORP_ID: ""
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
DATABASE_HOST: dolphinscheduler-postgresql DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432 DATABASE_PORT: 5432
DATABASE_USERNAME: root DATABASE_USERNAME: root
@ -169,21 +158,12 @@ services:
TZ: Asia/Shanghai TZ: Asia/Shanghai
WORKER_EXEC_THREADS: "100" WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10" WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100" WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1" WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default" WORKER_GROUP: "default"
WORKER_WEIGHT: "100" WORKER_WEIGHT: "100"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler
XLS_FILE_PATH: "/tmp/xls" ALERT_LISTEN_HOST: dolphinscheduler-alert
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
DATABASE_HOST: dolphinscheduler-postgresql DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432 DATABASE_PORT: 5432
DATABASE_USERNAME: root DATABASE_USERNAME: root

28
docker/docker-swarm/docker-stack.yml

@ -84,22 +84,11 @@ services:
dolphinscheduler-alert: dolphinscheduler-alert:
image: apache/dolphinscheduler:latest image: apache/dolphinscheduler:latest
command: alert-server command: alert-server
ports:
- 50052:50052
environment: environment:
TZ: Asia/Shanghai TZ: Asia/Shanghai
XLS_FILE_PATH: "/tmp/xls" ALERT_PLUGIN_DIR: lib/plugin/alert
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
ENTERPRISE_WECHAT_ENABLE: "false"
ENTERPRISE_WECHAT_CORP_ID: ""
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
DATABASE_HOST: dolphinscheduler-postgresql DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432 DATABASE_PORT: 5432
DATABASE_USERNAME: root DATABASE_USERNAME: root
@ -163,21 +152,12 @@ services:
TZ: Asia/Shanghai TZ: Asia/Shanghai
WORKER_EXEC_THREADS: "100" WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10" WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100" WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1" WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default" WORKER_GROUP: "default"
WORKER_WEIGHT: "100" WORKER_WEIGHT: "100"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler
XLS_FILE_PATH: "/tmp/xls" ALERT_LISTEN_HOST: dolphinscheduler-alert
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
DATABASE_HOST: dolphinscheduler-postgresql DATABASE_HOST: dolphinscheduler-postgresql
DATABASE_PORT: 5432 DATABASE_PORT: 5432
DATABASE_USERNAME: root DATABASE_USERNAME: root

2
dolphinscheduler-alert/src/main/resources/alert.properties

@ -28,5 +28,3 @@ alert.plugin.dir=./lib/plugin/alert
#alert.plugin.binding config the Alert Plugin need be load when development and run in IDE #alert.plugin.binding config the Alert Plugin need be load when development and run in IDE
#alert.plugin.binding=\ #alert.plugin.binding=\
# ./dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/pom.xml # ./dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/pom.xml

6
dolphinscheduler-api/src/main/resources/application-api.properties

@ -21,9 +21,10 @@ server.port=12345
# session config # session config
server.servlet.session.timeout=7200 server.servlet.session.timeout=7200
# servlet config
server.servlet.context-path=/dolphinscheduler/ server.servlet.context-path=/dolphinscheduler/
# Set time zone # time zone
spring.jackson.time-zone=GMT+8 spring.jackson.time-zone=GMT+8
# file size limit for upload # file size limit for upload
@ -37,6 +38,7 @@ server.compression.mime-types=text/html,text/xml,text/plain,text/css,text/javasc
# post content # post content
server.jetty.max-http-form-post-size=5000000 server.jetty.max-http-form-post-size=5000000
# i18n
spring.messages.encoding=UTF-8 spring.messages.encoding=UTF-8
# i18n classpath folder , file prefix messages, if have many files, use "," seperator # i18n classpath folder , file prefix messages, if have many files, use "," seperator
@ -58,5 +60,3 @@ security.authentication.type=PASSWORD
#ldap.password=password #ldap.password=password
#ldap.user.identity.attribute=uid #ldap.user.identity.attribute=uid
#ldap.user.email.attribute=mail #ldap.user.email.attribute=mail

4
dolphinscheduler-api/src/main/resources/logback-api.xml

@ -31,12 +31,12 @@
<!-- api server logback config start --> <!-- api server logback config start -->
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-api-server.log</file> <file>${log.base}/dolphinscheduler-api.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level> <level>INFO</level>
</filter> </filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> <fileNamePattern>${log.base}/dolphinscheduler-api.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory> <maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize> <maxFileSize>64MB</maxFileSize>
</rollingPolicy> </rollingPolicy>

9
dolphinscheduler-common/src/main/resources/common.properties

@ -33,7 +33,7 @@ java.security.krb5.conf.path=/opt/krb5.conf
# login user from keytab username # login user from keytab username
login.user.keytab.username=hdfs-mycluster@ESZ.COM login.user.keytab.username=hdfs-mycluster@ESZ.COM
# loginUserFromKeytab path # login user from keytab path
login.user.keytab.path=/opt/hdfs.headless.keytab login.user.keytab.path=/opt/hdfs.headless.keytab
#resource.view.suffixs #resource.view.suffixs
@ -45,13 +45,13 @@ hdfs.root.user=hdfs
# if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
fs.defaultFS=hdfs://mycluster:8020 fs.defaultFS=hdfs://mycluster:8020
# if resource.storage.type=S3s3 endpoint # if resource.storage.type=S3, s3 endpoint
fs.s3a.endpoint=http://192.168.xx.xx:9010 fs.s3a.endpoint=http://192.168.xx.xx:9010
# if resource.storage.type=S3s3 access key # if resource.storage.type=S3, s3 access key
fs.s3a.access.key=A3DXS30FO22544RE fs.s3a.access.key=A3DXS30FO22544RE
# if resource.storage.type=S3s3 secret key # if resource.storage.type=S3, s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
# if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty # if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
@ -59,6 +59,7 @@ yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# if resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname. # if resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname.
yarn.application.status.address=http://ds1:8088/ws/v1/cluster/apps/%s yarn.application.status.address=http://ds1:8088/ws/v1/cluster/apps/%s
# job history status url when application number threshold is reached(default 10000,maybe it was set to 1000) # job history status url when application number threshold is reached(default 10000,maybe it was set to 1000)
yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s

2
dolphinscheduler-server/src/main/resources/master.properties

@ -21,7 +21,6 @@
# master execute task number in parallel # master execute task number in parallel
#master.exec.task.num=20 #master.exec.task.num=20
# master dispatch task number # master dispatch task number
#master.dispatch.task.num=3 #master.dispatch.task.num=3
@ -34,7 +33,6 @@
# master commit task interval # master commit task interval
#master.task.commit.interval=1000 #master.task.commit.interval=1000
# only less than cpu avg load, master server can work. default value -1 : the number of cpu cores * 2 # only less than cpu avg load, master server can work. default value -1 : the number of cpu cores * 2
#master.max.cpuload.avg=-1 #master.max.cpuload.avg=-1

2
dolphinscheduler-service/src/main/resources/logback-zookeeper.xml

@ -32,7 +32,7 @@
<appender name="LOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <appender name="LOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-zookeeper.log</file> <file>${log.base}/dolphinscheduler-zookeeper.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> <fileNamePattern>${log.base}/dolphinscheduler-zookeeper.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>20</maxHistory> <maxHistory>20</maxHistory>
<maxFileSize>64MB</maxFileSize> <maxFileSize>64MB</maxFileSize>
</rollingPolicy> </rollingPolicy>

Loading…
Cancel
Save