Wenjun Ruan
1 year ago
committed by
GitHub
23 changed files with 729 additions and 292 deletions
@ -0,0 +1,91 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.dolphinscheduler.common.config; |
||||
|
||||
import java.util.Optional; |
||||
import java.util.Set; |
||||
import java.util.function.Function; |
||||
|
||||
public interface IPropertyDelegate { |
||||
|
||||
String get(String key); |
||||
|
||||
String get(String key, String defaultValue); |
||||
|
||||
Set<String> getPropertyKeys(); |
||||
|
||||
default Optional<String> getOptional(String key) { |
||||
return getOptional(key, Function.identity()); |
||||
} |
||||
|
||||
default Integer getInt(String key) { |
||||
return get(key, Integer::parseInt); |
||||
} |
||||
|
||||
default Integer getInt(String key, Integer defaultValue) { |
||||
return get(key, Integer::parseInt, defaultValue); |
||||
} |
||||
|
||||
default Long getLong(String key) { |
||||
return get(key, Long::parseLong); |
||||
} |
||||
|
||||
default Long getLong(String key, Long defaultValue) { |
||||
return get(key, Long::parseLong, defaultValue); |
||||
} |
||||
|
||||
default Double getDouble(String key) { |
||||
return get(key, Double::parseDouble); |
||||
} |
||||
|
||||
default Double getDouble(String key, Double defaultValue) { |
||||
return get(key, Double::parseDouble, defaultValue); |
||||
} |
||||
|
||||
default Boolean getBoolean(String key) { |
||||
return get(key, Boolean::parseBoolean); |
||||
} |
||||
|
||||
default Boolean getBoolean(String key, Boolean defaultValue) { |
||||
return get(key, Boolean::parseBoolean, defaultValue); |
||||
} |
||||
|
||||
default <T> T get(String key, Function<String, T> transformFunction) { |
||||
String value = get(key); |
||||
if (value == null) { |
||||
return null; |
||||
} |
||||
return transformFunction.apply(value); |
||||
} |
||||
|
||||
default <T> T get(String key, Function<String, T> transformFunction, T defaultValue) { |
||||
String value = get(key); |
||||
if (value == null) { |
||||
return defaultValue; |
||||
} |
||||
try { |
||||
return transformFunction.apply(value); |
||||
} catch (Exception ignored) { |
||||
return defaultValue; |
||||
} |
||||
} |
||||
|
||||
default <T> Optional<T> getOptional(String key, Function<String, T> transformFunction) { |
||||
return Optional.ofNullable(get(key, transformFunction)); |
||||
} |
||||
} |
@ -0,0 +1,137 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.dolphinscheduler.common.config; |
||||
|
||||
import java.util.HashSet; |
||||
import java.util.Map; |
||||
import java.util.Optional; |
||||
import java.util.Set; |
||||
import java.util.concurrent.ConcurrentHashMap; |
||||
|
||||
import lombok.AllArgsConstructor; |
||||
import lombok.Data; |
||||
import lombok.extern.slf4j.Slf4j; |
||||
|
||||
/** |
||||
* This class will get the property by the priority of the following: env > jvm > properties. |
||||
*/ |
||||
@Slf4j |
||||
public class ImmutablePriorityPropertyDelegate extends ImmutablePropertyDelegate { |
||||
|
||||
private static final Map<String, Optional<ConfigValue<String>>> configValueMap = new ConcurrentHashMap<>(); |
||||
|
||||
public ImmutablePriorityPropertyDelegate(String propertyAbsolutePath) { |
||||
super(propertyAbsolutePath); |
||||
} |
||||
|
||||
@Override |
||||
public String get(String key) { |
||||
Optional<ConfigValue<String>> configValue = configValueMap.computeIfAbsent(key, k -> { |
||||
Optional<ConfigValue<String>> value = getConfigValueFromEnv(key); |
||||
if (value.isPresent()) { |
||||
log.debug("Override config value from env, key: {} actualKey: {}, value: {}", |
||||
k, |
||||
value.get().getActualKey(), value.get().getValue()); |
||||
return value; |
||||
} |
||||
value = getConfigValueFromJvm(key); |
||||
if (value.isPresent()) { |
||||
log.debug("Override config value from jvm, key: {} actualKey: {}, value: {}", |
||||
k, value.get().getActualKey(), value.get().getValue()); |
||||
return value; |
||||
} |
||||
value = getConfigValueFromProperties(key); |
||||
value.ifPresent( |
||||
stringConfigValue -> log.debug("Get config value from properties, key: {} actualKey: {}, value: {}", |
||||
k, stringConfigValue.getActualKey(), stringConfigValue.getValue())); |
||||
return value; |
||||
}); |
||||
return configValue.map(ConfigValue::getValue).orElse(null); |
||||
} |
||||
|
||||
@Override |
||||
public String get(String key, String defaultValue) { |
||||
String value = get(key); |
||||
if (value == null) { |
||||
return defaultValue; |
||||
} |
||||
return value; |
||||
} |
||||
|
||||
@Override |
||||
public Set<String> getPropertyKeys() { |
||||
Set<String> propertyKeys = new HashSet<>(); |
||||
propertyKeys.addAll(super.getPropertyKeys()); |
||||
propertyKeys.addAll(System.getProperties().stringPropertyNames()); |
||||
propertyKeys.addAll(System.getenv().keySet()); |
||||
return propertyKeys; |
||||
} |
||||
|
||||
private Optional<ConfigValue<String>> getConfigValueFromEnv(String key) { |
||||
String value = System.getenv(key); |
||||
if (value != null) { |
||||
return Optional.of(ConfigValue.fromEnv(key, value)); |
||||
} |
||||
String envVarKey = String.valueOf(key).replaceAll("[.-]", "_").toUpperCase(); |
||||
String envVarVal = System.getenv(envVarKey); |
||||
if (envVarVal != null) { |
||||
return Optional.of(ConfigValue.fromEnv(key, envVarVal)); |
||||
} |
||||
return Optional.empty(); |
||||
} |
||||
|
||||
private Optional<ConfigValue<String>> getConfigValueFromJvm(String key) { |
||||
String value = System.getProperty(key); |
||||
if (value != null) { |
||||
return Optional.of(ConfigValue.fromJvm(key, value)); |
||||
} |
||||
return Optional.empty(); |
||||
} |
||||
|
||||
private Optional<ConfigValue<String>> getConfigValueFromProperties(String key) { |
||||
String value = super.get(key); |
||||
if (value != null) { |
||||
return Optional.of(ConfigValue.fromProperties(key, value)); |
||||
} |
||||
return Optional.empty(); |
||||
} |
||||
|
||||
@Data |
||||
@AllArgsConstructor |
||||
public static final class ConfigValue<T> { |
||||
|
||||
private String actualKey; |
||||
private T value; |
||||
private boolean fromProperties; |
||||
private boolean fromJvm; |
||||
private boolean fromEnv; |
||||
|
||||
public static <T> ConfigValue<T> fromProperties(String actualKey, T value) { |
||||
return new ConfigValue<>(actualKey, value, true, false, false); |
||||
} |
||||
|
||||
public static <T> ConfigValue<T> fromJvm(String actualKey, T value) { |
||||
return new ConfigValue<>(actualKey, value, false, true, false); |
||||
} |
||||
|
||||
public static <T> ConfigValue<T> fromEnv(String actualKey, T value) { |
||||
return new ConfigValue<>(actualKey, value, false, false, true); |
||||
} |
||||
} |
||||
|
||||
} |
@ -0,0 +1,80 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.dolphinscheduler.common.config; |
||||
|
||||
import java.io.IOException; |
||||
import java.io.InputStream; |
||||
import java.util.Properties; |
||||
import java.util.Set; |
||||
|
||||
import lombok.extern.slf4j.Slf4j; |
||||
|
||||
/** |
||||
* This class is used to get the properties from the classpath. |
||||
*/ |
||||
@Slf4j |
||||
public class ImmutablePropertyDelegate implements IPropertyDelegate { |
||||
|
||||
private static final String COMMON_PROPERTIES_NAME = "/common.properties"; |
||||
|
||||
private final Properties properties; |
||||
|
||||
public ImmutablePropertyDelegate() { |
||||
this(COMMON_PROPERTIES_NAME); |
||||
} |
||||
|
||||
public ImmutablePropertyDelegate(String... propertyAbsolutePath) { |
||||
properties = new Properties(); |
||||
// read from classpath
|
||||
for (String fileName : propertyAbsolutePath) { |
||||
try (InputStream fis = getClass().getResourceAsStream(fileName)) { |
||||
Properties subProperties = new Properties(); |
||||
subProperties.load(fis); |
||||
properties.putAll(subProperties); |
||||
} catch (IOException e) { |
||||
log.error("Load property: {} error, please check if the file exist under classpath", |
||||
propertyAbsolutePath, e); |
||||
System.exit(1); |
||||
} |
||||
} |
||||
printProperties(); |
||||
} |
||||
|
||||
public ImmutablePropertyDelegate(Properties properties) { |
||||
this.properties = properties; |
||||
} |
||||
|
||||
@Override |
||||
public String get(String key) { |
||||
return properties.getProperty(key); |
||||
} |
||||
|
||||
@Override |
||||
public String get(String key, String defaultValue) { |
||||
return properties.getProperty(key, defaultValue); |
||||
} |
||||
|
||||
@Override |
||||
public Set<String> getPropertyKeys() { |
||||
return properties.stringPropertyNames(); |
||||
} |
||||
|
||||
private void printProperties() { |
||||
properties.forEach((k, v) -> log.debug("Get property {} -> {}", k, v)); |
||||
} |
||||
} |
@ -0,0 +1,40 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.dolphinscheduler.common.config; |
||||
|
||||
import static com.github.stefanbirkner.systemlambda.SystemLambda.withEnvironmentVariable; |
||||
import static org.apache.dolphinscheduler.common.constants.Constants.COMMON_PROPERTIES_PATH; |
||||
|
||||
import org.junit.jupiter.api.Assertions; |
||||
import org.junit.jupiter.api.Test; |
||||
|
||||
class ImmutablePriorityPropertyDelegateTest { |
||||
|
||||
private final ImmutablePriorityPropertyDelegate immutablePriorityPropertyDelegate = |
||||
new ImmutablePriorityPropertyDelegate(COMMON_PROPERTIES_PATH); |
||||
|
||||
@Test |
||||
void getOverrideFromEnv() throws Exception { |
||||
withEnvironmentVariable("string.property", "valueFromEnv") |
||||
.execute(() -> { |
||||
String value = immutablePriorityPropertyDelegate.get("string.property"); |
||||
Assertions.assertEquals("valueFromEnv", value); |
||||
}); |
||||
} |
||||
|
||||
} |
@ -0,0 +1,87 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package org.apache.dolphinscheduler.common.config; |
||||
|
||||
import org.junit.jupiter.api.Assertions; |
||||
import org.junit.jupiter.api.Test; |
||||
|
||||
class ImmutablePropertyDelegateTest { |
||||
|
||||
private static final ImmutablePropertyDelegate immutablePropertyDelegate = new ImmutablePropertyDelegate(); |
||||
|
||||
@Test |
||||
void get() { |
||||
Assertions.assertNull(immutablePropertyDelegate.get("null")); |
||||
} |
||||
|
||||
@Test |
||||
void testGetDefaultValue() { |
||||
Assertions.assertEquals("default", immutablePropertyDelegate.get("null", "default")); |
||||
} |
||||
|
||||
@Test |
||||
void getPropertyKeys() { |
||||
Assertions.assertNotNull(immutablePropertyDelegate.getPropertyKeys()); |
||||
} |
||||
|
||||
@Test |
||||
void getOptional() { |
||||
Assertions.assertFalse(immutablePropertyDelegate.getOptional("null").isPresent()); |
||||
} |
||||
|
||||
@Test |
||||
void getInt() { |
||||
Assertions.assertEquals(1, immutablePropertyDelegate.getInt("int.property")); |
||||
} |
||||
|
||||
@Test |
||||
void getIntDefault() { |
||||
Assertions.assertEquals(2, immutablePropertyDelegate.getInt("int2.property", 2)); |
||||
} |
||||
|
||||
@Test |
||||
void getLong() { |
||||
Assertions.assertEquals(1, immutablePropertyDelegate.getLong("long.property")); |
||||
} |
||||
|
||||
@Test |
||||
void getLongDefault() { |
||||
Assertions.assertEquals(2, immutablePropertyDelegate.getLong("long2.property", 2L)); |
||||
} |
||||
|
||||
@Test |
||||
void getDouble() { |
||||
Assertions.assertEquals(1.1, immutablePropertyDelegate.getDouble("double.property")); |
||||
} |
||||
|
||||
@Test |
||||
void getDoubleDefault() { |
||||
Assertions.assertEquals(2.2, immutablePropertyDelegate.getDouble("double2.property", 2.2d)); |
||||
} |
||||
|
||||
@Test |
||||
void getBoolean() { |
||||
Assertions.assertEquals(true, immutablePropertyDelegate.getBoolean("boolean.property")); |
||||
} |
||||
|
||||
@Test |
||||
void getBooleanDefault() { |
||||
Assertions.assertEquals(false, immutablePropertyDelegate.getBoolean("boolean2.property", false)); |
||||
} |
||||
|
||||
} |
@ -0,0 +1,199 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
string.property=string |
||||
int.property=1 |
||||
long.property=1 |
||||
double.property=1.1 |
||||
boolean.property=true |
||||
|
||||
# user data local directory path, please make sure the directory exists and have read write permissions |
||||
data.basedir.path=/tmp/dolphinscheduler |
||||
|
||||
# resource view suffixs |
||||
#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js |
||||
|
||||
# resource storage type: LOCAL, HDFS, S3, OSS, GCS, ABS, NONE. LOCAL type is a specific type of HDFS with "resource.hdfs.fs.defaultFS = file:///" configuration |
||||
# please notice that LOCAL mode does not support reading and writing in distributed mode, which mean you can only use your resource in one machine, unless |
||||
# use shared file mount point |
||||
resource.storage.type=LOCAL |
||||
# resource store on HDFS/S3 path, resource file will store to this base path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended |
||||
resource.storage.upload.base.path=/dolphinscheduler |
||||
|
||||
# The Azure client ID (Azure Application (client) ID) |
||||
resource.azure.client.id=minioadmin |
||||
# The Azure client secret in the Azure application |
||||
resource.azure.client.secret=minioadmin |
||||
# The Azure data factory subscription ID |
||||
resource.azure.subId=minioadmin |
||||
# The Azure tenant id in the Azure Active Directory |
||||
resource.azure.tenant.id=minioadmin |
||||
# The query interval |
||||
resource.query.interval=10000 |
||||
|
||||
# The AWS access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required |
||||
resource.aws.access.key.id=minioadmin |
||||
# The AWS secret access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required |
||||
resource.aws.secret.access.key=minioadmin |
||||
# The AWS Region to use. if resource.storage.type=S3 or use EMR-Task, This configuration is required |
||||
resource.aws.region=cn-north-1 |
||||
# The name of the bucket. You need to create them by yourself. Otherwise, the system cannot start. All buckets in Amazon S3 share a single namespace; ensure the bucket is given a unique name. |
||||
resource.aws.s3.bucket.name=dolphinscheduler |
||||
# You need to set this parameter when private cloud s3. If S3 uses public cloud, you only need to set resource.aws.region or set to the endpoint of a public cloud such as S3.cn-north-1.amazonaws.com.cn |
||||
resource.aws.s3.endpoint=http://localhost:9000 |
||||
|
||||
# alibaba cloud access key id, required if you set resource.storage.type=OSS |
||||
resource.alibaba.cloud.access.key.id=<your-access-key-id> |
||||
# alibaba cloud access key secret, required if you set resource.storage.type=OSS |
||||
resource.alibaba.cloud.access.key.secret=<your-access-key-secret> |
||||
# alibaba cloud region, required if you set resource.storage.type=OSS |
||||
resource.alibaba.cloud.region=cn-hangzhou |
||||
# oss bucket name, required if you set resource.storage.type=OSS |
||||
resource.alibaba.cloud.oss.bucket.name=dolphinscheduler |
||||
# oss bucket endpoint, required if you set resource.storage.type=OSS |
||||
resource.alibaba.cloud.oss.endpoint=https://oss-cn-hangzhou.aliyuncs.com |
||||
|
||||
# the location of the google cloud credential, required if you set resource.storage.type=GCS |
||||
resource.google.cloud.storage.credential=/path/to/credential |
||||
# gcs bucket name, required if you set resource.storage.type=GCS |
||||
resource.google.cloud.storage.bucket.name=<your-bucket> |
||||
|
||||
# abs container name, required if you set resource.storage.type=ABS |
||||
resource.azure.blob.storage.container.name=<your-container> |
||||
# abs account name, required if you set resource.storage.type=ABS |
||||
resource.azure.blob.storage.account.name=<your-account-name> |
||||
# abs connection string, required if you set resource.storage.type=ABS |
||||
resource.azure.blob.storage.connection.string=<your-connection-string> |
||||
|
||||
# if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path |
||||
resource.hdfs.root.user=hdfs |
||||
# if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir |
||||
resource.hdfs.fs.defaultFS=hdfs://mycluster:8020 |
||||
|
||||
# whether to startup kerberos |
||||
hadoop.security.authentication.startup.state=false |
||||
|
||||
# java.security.krb5.conf path |
||||
java.security.krb5.conf.path=/opt/krb5.conf |
||||
|
||||
# login user from keytab username |
||||
login.user.keytab.username=hdfs-mycluster@ESZ.COM |
||||
|
||||
# login user from keytab path |
||||
login.user.keytab.path=/opt/hdfs.headless.keytab |
||||
|
||||
# kerberos expire time, the unit is hour |
||||
kerberos.expire.time=2 |
||||
|
||||
|
||||
# resourcemanager port, the default value is 8088 if not specified |
||||
resource.manager.httpaddress.port=8088 |
||||
# if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty |
||||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx |
||||
# if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname |
||||
yarn.application.status.address=http://ds1:%s/ws/v1/cluster/apps/%s |
||||
# job history status url when application number threshold is reached(default 10000, maybe it was set to 1000) |
||||
yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s |
||||
|
||||
# datasource encryption enable |
||||
datasource.encryption.enable=false |
||||
|
||||
# datasource encryption salt |
||||
datasource.encryption.salt=!@#$%^&* |
||||
|
||||
# data quality option |
||||
data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar |
||||
|
||||
#data-quality.error.output.path=/tmp/data-quality-error-data |
||||
|
||||
# Network IP gets priority, default inner outer |
||||
|
||||
# Whether hive SQL is executed in the same session |
||||
support.hive.oneSession=false |
||||
|
||||
# use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions |
||||
sudo.enable=true |
||||
|
||||
# network interface preferred like eth0, default: empty |
||||
#dolphin.scheduler.network.interface.preferred= |
||||
|
||||
# network interface restrict like docker0,docker1 , default: docker0 |
||||
dolphin.scheduler.network.interface.restrict=docker0 |
||||
|
||||
# network IP gets priority, default: inner outer |
||||
#dolphin.scheduler.network.priority.strategy=default |
||||
|
||||
# system env path |
||||
#dolphinscheduler.env.path=dolphinscheduler_env.sh |
||||
|
||||
# development state |
||||
development.state=false |
||||
|
||||
# rpc port |
||||
alert.rpc.port=50052 |
||||
|
||||
# set path of conda.sh |
||||
conda.path=/opt/anaconda3/etc/profile.d/conda.sh |
||||
|
||||
# Task resource limit state |
||||
task.resource.limit.state=false |
||||
|
||||
# mlflow task plugin preset repository |
||||
ml.mlflow.preset_repository=https://github.com/apache/dolphinscheduler-mlflow |
||||
# mlflow task plugin preset repository version |
||||
ml.mlflow.preset_repository_version="main" |
||||
|
||||
# way to collect applicationId: log(original regex match), aop |
||||
appId.collect=log |
||||
|
||||
# The default env list will be load by Shell task, e.g. /etc/profile,~/.bash_profile |
||||
shell.env_source_list= |
||||
# The interceptor type of Shell task, e.g. bash, sh, cmd |
||||
shell.interceptor.type=bash |
||||
|
||||
# Whether to enable remote logging |
||||
remote.logging.enable=false |
||||
# if remote.logging.enable = true, set the target of remote logging |
||||
remote.logging.target=OSS |
||||
# if remote.logging.enable = true, set the log base directory |
||||
remote.logging.base.dir=logs |
||||
# if remote.logging.enable = true, set the number of threads to send logs to remote storage |
||||
remote.logging.thread.pool.size=10 |
||||
# oss access key id, required if you set remote.logging.target=OSS |
||||
remote.logging.oss.access.key.id=<access.key.id> |
||||
# oss access key secret, required if you set remote.logging.target=OSS |
||||
remote.logging.oss.access.key.secret=<access.key.secret> |
||||
# oss bucket name, required if you set remote.logging.target=OSS |
||||
remote.logging.oss.bucket.name=<bucket.name> |
||||
# oss endpoint, required if you set remote.logging.target=OSS |
||||
remote.logging.oss.endpoint=<endpoint> |
||||
# s3 access key id, required if you set remote.logging.target=S3 |
||||
remote.logging.s3.access.key.id=<access.key.id> |
||||
# s3 access key secret, required if you set remote.logging.target=S3 |
||||
remote.logging.s3.access.key.secret=<access.key.secret> |
||||
# s3 bucket name, required if you set remote.logging.target=S3 |
||||
remote.logging.s3.bucket.name=<bucket.name> |
||||
# s3 endpoint, required if you set remote.logging.target=S3 |
||||
remote.logging.s3.endpoint=<endpoint> |
||||
# s3 region, required if you set remote.logging.target=S3 |
||||
remote.logging.s3.region=<region> |
||||
# the location of the google cloud credential, required if you set remote.logging.target=GCS |
||||
remote.logging.google.cloud.storage.credential=/path/to/credential |
||||
# gcs bucket name, required if you set remote.logging.target=GCS |
||||
remote.logging.google.cloud.storage.bucket.name=<your-bucket> |
||||
|
||||
|
Loading…
Reference in new issue