You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
84 lines
4.1 KiB
84 lines
4.1 KiB
# |
|
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
# contributor license agreements. See the NOTICE file distributed with |
|
# this work for additional information regarding copyright ownership. |
|
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
# (the "License"); you may not use this file except in compliance with |
|
# the License. You may obtain a copy of the License at |
|
# |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
|
# |
|
# Unless required by applicable law or agreed to in writing, software |
|
# distributed under the License is distributed on an "AS IS" BASIS, |
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
# See the License for the specific language governing permissions and |
|
# limitations under the License. |
|
# |
|
|
|
#task queue implementation, default "zookeeper" |
|
dolphinscheduler.queue.impl=${TASK_QUEUE} |
|
|
|
#zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 |
|
zookeeper.quorum=${ZOOKEEPER_QUORUM} |
|
#dolphinscheduler root directory |
|
zookeeper.dolphinscheduler.root=/dolphinscheduler |
|
#dolphinscheduler failover directory |
|
zookeeper.session.timeout=300 |
|
zookeeper.connection.timeout=300 |
|
zookeeper.retry.base.sleep=100 |
|
zookeeper.retry.max.sleep=30000 |
|
zookeeper.retry.maxtime=5 |
|
|
|
#============================================================================ |
|
# System |
|
#============================================================================ |
|
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions |
|
dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH} |
|
#resource.view.suffixs |
|
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties |
|
# is development state? default "false" |
|
development.state=true |
|
# user data directory path, self configuration, please make sure the directory exists and have read write permissions |
|
data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH} |
|
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions |
|
data.download.basedir.path=${DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH} |
|
# process execute directory. self configuration, please make sure the directory exists and have read write permissions |
|
process.exec.basepath=${DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH} |
|
|
|
# resource upload startup type : HDFS,S3,NONE |
|
res.upload.startup.type=NONE |
|
|
|
#============================================================================ |
|
# HDFS |
|
#============================================================================ |
|
# Users who have permission to create directories under the HDFS root path |
|
hdfs.root.user=hdfs |
|
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended |
|
data.store2hdfs.basepath=/dolphinscheduler |
|
# whether kerberos starts |
|
hadoop.security.authentication.startup.state=false |
|
# java.security.krb5.conf path |
|
java.security.krb5.conf.path=/opt/krb5.conf |
|
# loginUserFromKeytab user |
|
login.user.keytab.username=hdfs-mycluster@ESZ.COM |
|
# loginUserFromKeytab path |
|
login.user.keytab.path=/opt/hdfs.headless.keytab |
|
|
|
#============================================================================ |
|
# S3 |
|
#============================================================================ |
|
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml |
|
# to the conf directory,support s3,for example : s3a://dolphinscheduler |
|
fs.defaultFS=hdfs://mycluster:8020 |
|
# s3 need,s3 endpoint |
|
fs.s3a.endpoint=http://192.168.199.91:9010 |
|
# s3 need,s3 access key |
|
fs.s3a.access.key=A3DXS30FO22544RE |
|
# s3 need,s3 secret key |
|
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK |
|
#resourcemanager ha note this need ips , this empty if single |
|
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx |
|
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine |
|
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s |
|
|
|
|
|
|