You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
Tree:
e83237f37e
2.0.7-release
2.0.8-release
2.0.9-release
3.0.0-release
3.0.0/version-upgrade
3.0.1-release
3.0.2-release
3.0.3-release
3.0.4-release
3.0.5-release
3.0.6-release
3.1.0-release
3.1.1-release
3.1.2-release
3.1.3-release
3.1.4-release
3.1.5-release
3.1.6-release
3.1.7-release
3.1.8-release
3.1.9-release
3.2.0-release
3.2.1-prepare
3.2.1-release
3.2.2-prepare
3.2.2-release
3.2.2-release-bak
dev
dev_wenjun_refactorMaster
upstream-dev
1.0.0
1.0.1
1.0.2
1.0.3
1.0.4
1.0.5
1.1.0
1.1.0-preview
1.2.0
1.2.1
1.3.0
1.3.1
1.3.2
1.3.3
1.3.4
1.3.5
1.3.6
1.3.8
1.3.9
2.0.0
2.0.0-alpha
2.0.1
2.0.2
2.0.3
2.0.4
2.0.5
2.0.6
2.0.7
2.0.8
2.0.9
3.0.0
3.0.0-alpha
3.0.0-beta-1
3.0.0-beta-2
3.0.1
3.0.2
3.0.3
3.0.4
3.0.5
3.0.6
3.1.0
3.1.1
3.1.2
3.1.3
3.1.4
3.1.5
3.1.6
3.1.7
3.1.8
3.1.9
3.2.0
3.2.1
3.2.2
test
v2.3.3
v2.3.4.202211081355
v2.3.4.202211091515
vdev-20220412-1028
vdev-20220414-1341
${ noResults }
DolphinScheduler/docker/build/conf/dolphinscheduler/common.properties.tpl
79 lines
3.5 KiB
79 lines
3.5 KiB
#
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|||
# contributor license agreements. See the NOTICE file distributed with
|
|||
# this work for additional information regarding copyright ownership.
|
|||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|||
# (the "License"); you may not use this file except in compliance with
|
|||
# the License. You may obtain a copy of the License at
|
|||
#
|
|||
# http://www.apache.org/licenses/LICENSE-2.0
|
|||
#
|
|||
# Unless required by applicable law or agreed to in writing, software
|
|||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
# See the License for the specific language governing permissions and
|
|||
# limitations under the License.
|
|||
#
|
|||
|
|||
#============================================================================
|
|||
# System
|
|||
#============================================================================
|
|||
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
|
|||
dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH}
|
|||
|
|||
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
|
|||
data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH}
|
|||
|
|||
# resource upload startup type : HDFS,S3,NONE
|
|||
resource.storage.type=NONE
|
|||
|
|||
#============================================================================
|
|||
# HDFS
|
|||
#============================================================================
|
|||
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
|
|||
#resource.upload.path=/dolphinscheduler
|
|||
|
|||
# whether kerberos starts
|
|||
#hadoop.security.authentication.startup.state=false
|
|||
|
|||
# java.security.krb5.conf path
|
|||
#java.security.krb5.conf.path=/opt/krb5.conf
|
|||
|
|||
# loginUserFromKeytab user
|
|||
#login.user.keytab.username=hdfs-mycluster@ESZ.COM
|
|||
|
|||
# loginUserFromKeytab path
|
|||
#login.user.keytab.path=/opt/hdfs.headless.keytab
|
|||
|
|||
#resource.view.suffixs
|
|||
#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
|
|||
|
|||
# if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path
|
|||
hdfs.root.user=hdfs
|
|||
|
|||
# kerberos expire time
|
|||
kerberos.expire.time=7
|
|||
|
|||
#============================================================================
|
|||
# S3
|
|||
#============================================================================
|
|||
# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
|
|||
fs.defaultFS=hdfs://mycluster:8020
|
|||
|
|||
# if resource.storage.type=S3,s3 endpoint
|
|||
#fs.s3a.endpoint=http://192.168.199.91:9010
|
|||
|
|||
# if resource.storage.type=S3,s3 access key
|
|||
#fs.s3a.access.key=A3DXS30FO22544RE
|
|||
|
|||
# if resource.storage.type=S3,s3 secret key
|
|||
#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
|
|||
|
|||
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO
|
|||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
|
|||
|
|||
# If resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
|
|||
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
|
|||
|
|||
|