Browse Source

improve install_config.conf description (#6681)

* improve install_config.conf description

* Update install_config.conf

update accroding review ideas
3.0.0/version-upgrade
xuhhui 3 years ago committed by GitHub
parent
commit
2b90d90873
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 30
      dolphinscheduler-server/src/main/resources/config/install_config.conf

30
dolphinscheduler-server/src/main/resources/config/install_config.conf

@ -21,7 +21,7 @@
# A comma separated list of machine hostname or IP would be installed DolphinScheduler, # A comma separated list of machine hostname or IP would be installed DolphinScheduler,
# including master, worker, api, alert. If you want to deploy in pseudo-distributed # including master, worker, api, alert. If you want to deploy in pseudo-distributed
# mode, just write a pseudo-distributed hostname # mode, just write a pseudo-distributed hostname
# Example for hostnames: ips="ds1,ds2,ds3,ds4,ds5", Example for IP: ips="192.168.8.1,192.168.8.2,192.168.8.3,192.168.8.4,192.168.8.5" # Example for hostnames: ips="ds1,ds2,ds3,ds4,ds5", Example for IPs: ips="192.168.8.1,192.168.8.2,192.168.8.3,192.168.8.4,192.168.8.5"
ips="ds1,ds2,ds3,ds4,ds5" ips="ds1,ds2,ds3,ds4,ds5"
# Port of SSH protocol, default value is 22. For now we only support same port in all `ips` machine # Port of SSH protocol, default value is 22. For now we only support same port in all `ips` machine
@ -30,29 +30,29 @@ sshPort="22"
# A comma separated list of machine hostname or IP would be installed Master server, it # A comma separated list of machine hostname or IP would be installed Master server, it
# must be a subset of configuration `ips`. # must be a subset of configuration `ips`.
# Example for hostnames: ips="ds1,ds2", Example for IP: ips="192.168.8.1,192.168.8.2" # Example for hostnames: masters="ds1,ds2", Example for IPs: masters="192.168.8.1,192.168.8.2"
masters="ds1,ds2" masters="ds1,ds2"
# A comma separated list of machine <hostname>:<workerGroup> or <IP>:<workerGroup>.All hostname or IP must be a # A comma separated list of machine <hostname>:<workerGroup> or <IP>:<workerGroup>.All hostname or IP must be a
# subset of configuration `ips`, And workerGroup have default value as `default`, but we recommend you declare behind the hosts # subset of configuration `ips`, And workerGroup have default value as `default`, but we recommend you declare behind the hosts
# Example for hostnames: ips="ds1:default,ds2:default,ds3:default", Example for IP: ips="192.168.8.1:default,192.168.8.2:default,192.168.8.3:default" # Example for hostnames: workers="ds1:default,ds2:default,ds3:default", Example for IPs: workers="192.168.8.1:default,192.168.8.2:default,192.168.8.3:default"
workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default" workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default"
# A comma separated list of machine hostname or IP would be installed Alert server, it # A comma separated list of machine hostname or IP would be installed Alert server, it
# must be a subset of configuration `ips`. # must be a subset of configuration `ips`.
# Example for hostnames: ips="ds3", Example for IP: ips="192.168.8.3" # Example for hostname: alertServer="ds3", Example for IP: alertServer="192.168.8.3"
alertServer="ds3" alertServer="ds3"
# A comma separated list of machine hostname or IP would be installed API server, it # A comma separated list of machine hostname or IP would be installed API server, it
# must be a subset of configuration `ips`. # must be a subset of configuration `ips`.
# Example for hostnames: ips="ds1", Example for IP: ips="192.168.8.1" # Example for hostname: apiServers="ds1", Example for IP: apiServers="192.168.8.1"
apiServers="ds1" apiServers="ds1"
# The directory to install DolphinScheduler for all machine we config above. It will automatically created by `install.sh` script if not exists. # The directory to install DolphinScheduler for all machine we config above. It will automatically be created by `install.sh` script if not exists.
# **DO NOT** set this configuration same as the current path (pwd) # Do not set this configuration same as the current path (pwd)
installPath="/data1_1T/dolphinscheduler" installPath="/data1_1T/dolphinscheduler"
# The user to deploy DolphinScheduler for all machine we config above. For now user must create by yourself before run `install.sh` # The user to deploy DolphinScheduler for all machine we config above. For now user must create by yourself before running `install.sh`
# script. The user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled than the root directory needs # script. The user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled than the root directory needs
# to be created by this user # to be created by this user
deployUser="dolphinscheduler" deployUser="dolphinscheduler"
@ -67,7 +67,7 @@ dataBasedirPath="/tmp/dolphinscheduler"
# and this configuration only support one parameter so far. # and this configuration only support one parameter so far.
javaHome="/your/java/home/here" javaHome="/your/java/home/here"
# DolphinScheduler API service port, also this your DolphinScheduler UI component's URL port, default values is 12345 # DolphinScheduler API service port, also this is your DolphinScheduler UI component's URL port, default value is 12345
apiServerPort="12345" apiServerPort="12345"
# --------------------------------------------------------- # ---------------------------------------------------------
@ -122,12 +122,12 @@ taskPluginDir="lib/plugin/task"
# resource storage type: HDFS, S3, NONE # resource storage type: HDFS, S3, NONE
resourceStorageType="NONE" resourceStorageType="NONE"
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended # resource store on HDFS/S3 path, resource file will store to this hdfs path, self configuration, please make sure the directory exists on hdfs and has read write permissions. "/dolphinscheduler" is recommended
resourceUploadPath="/dolphinscheduler" resourceUploadPath="/dolphinscheduler"
# if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory. # if resourceStorageType is HDFS,defaultFS write namenode address,HA, you need to put core-site.xml and hdfs-site.xml in the conf directory.
# if S3,write S3 address,HA,for example :s3a://dolphinscheduler, # if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
# Note,s3 be sure to create the root directory /dolphinscheduler # Note,S3 be sure to create the root directory /dolphinscheduler
defaultFS="hdfs://mycluster:8020" defaultFS="hdfs://mycluster:8020"
# if resourceStorageType is S3, the following three configuration is required, otherwise please ignore # if resourceStorageType is S3, the following three configuration is required, otherwise please ignore
@ -138,13 +138,13 @@ s3SecretKey="xxxxxxxxxx"
# resourcemanager port, the default value is 8088 if not specified # resourcemanager port, the default value is 8088 if not specified
resourceManagerHttpAddressPort="8088" resourceManagerHttpAddressPort="8088"
# if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single node, keep this value empty
yarnHaIps="192.168.xx.xx,192.168.xx.xx" yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single node, you only need to replace 'yarnIp1' to actual resourcemanager hostname
singleYarnIp="yarnIp1" singleYarnIp="yarnIp1"
# who have permissions to create directory under HDFS/S3 root path # who has permission to create directory under HDFS/S3 root path
# Note: if kerberos is enabled, please config hdfsRootUser= # Note: if kerberos is enabled, please config hdfsRootUser=
hdfsRootUser="hdfs" hdfsRootUser="hdfs"

Loading…
Cancel
Save