分布式调度框架。
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

339 lines
12 KiB

#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
spring:
profiles:
active: h2
jackson:
time-zone: UTC
date-format: "yyyy-MM-dd HH:mm:ss"
banner:
charset: UTF-8
sql:
init:
schema-locations: classpath:sql/dolphinscheduler_h2.sql
datasource:
driver-class-name: org.h2.Driver
url: jdbc:h2:mem:dolphinscheduler;MODE=MySQL;DB_CLOSE_DELAY=-1;DATABASE_TO_LOWER=true
username: sa
password: ""
quartz:
job-store-type: jdbc
jdbc:
initialize-schema: never
properties:
org.quartz.threadPool.threadPriority: 5
org.quartz.jobStore.isClustered: true
org.quartz.jobStore.class: org.springframework.scheduling.quartz.LocalDataSourceJobStore
org.quartz.scheduler.instanceId: AUTO
org.quartz.jobStore.tablePrefix: QRTZ_
org.quartz.jobStore.acquireTriggersWithinLock: true
org.quartz.scheduler.instanceName: DolphinScheduler
org.quartz.threadPool.class: org.quartz.simpl.SimpleThreadPool
org.quartz.jobStore.useProperties: false
org.quartz.threadPool.makeThreadsDaemons: true
org.quartz.threadPool.threadCount: 25
org.quartz.jobStore.misfireThreshold: 60000
org.quartz.scheduler.makeSchedulerThreadDaemon: true
org.quartz.jobStore.driverDelegateClass: org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.clusterCheckinInterval: 5000
org.quartz.scheduler.batchTriggerAcquisitionMaxCount: 1
servlet:
multipart:
max-file-size: 1024MB
max-request-size: 1024MB
messages:
basename: i18n/messages
jpa:
hibernate:
ddl-auto: none
mvc:
pathmatch:
matching-strategy: ANT_PATH_MATCHER
cloud.discovery.client.composite-indicator.enabled: false
mybatis-plus:
mapper-locations: classpath:org/apache/dolphinscheduler/dao/mapper/*Mapper.xml
type-aliases-package: org.apache.dolphinscheduler.dao.entity
configuration:
cache-enabled: false
call-setters-on-nulls: true
map-underscore-to-camel-case: true
jdbc-type-for-null: NULL
global-config:
db-config:
id-type: auto
banner: false
registry:
type: zookeeper
zookeeper:
namespace: dolphinscheduler
connect-string: localhost:2181
retry-policy:
base-sleep-time: 1s
max-sleep: 3s
max-retries: 5
session-timeout: 60s
connection-timeout: 15s
block-until-connected: 15s
digest: ~
security:
authentication:
# Authentication types (supported types: PASSWORD,LDAP,CASDOOR_SSO)
type: PASSWORD
# IF you set type `LDAP`, below config will be effective
ldap:
# ldap server config
urls: ldap://ldap.forumsys.com:389/
base-dn: dc=example,dc=com
username: cn=read-only-admin,dc=example,dc=com
password: password
user:
# admin userId when you use LDAP login
admin: read-only-admin
identity-attribute: uid
email-attribute: mail
# action when ldap user is not exist (supported types: CREATE,DENY)
not-exist-action: CREATE
ssl:
enable: false
# jks file absolute path && password
trust-store: "/ldapkeystore.jks"
trust-store-password: ""
casdoor:
user:
admin: admin
oauth2:
enable: false
provider:
github:
authorizationUri: "https://github.com/login/oauth/authorize"
redirectUri: "http://localhost:12345/dolphinscheduler/redirect/login/oauth2"
clientId: ""
clientSecret: ""
tokenUri: "https://github.com/login/oauth/access_token"
userInfoUri: "https://api.github.com/user"
callbackUrl: "http://localhost:5173/login"
iconUri: ""
provider: github
gitee:
authorizationUri: "https://gitee.com/oauth/authorize"
redirectUri: "http://127.0.0.1:12345/dolphinscheduler/redirect/login/oauth2"
clientId: ""
clientSecret: ""
tokenUri: "https://gitee.com/oauth/token?grant_type=authorization_code"
userInfoUri: "https://gitee.com/api/v5/user"
callbackUrl: "http://127.0.0.1:5173/login"
iconUri: ""
provider: gitee
casdoor:
# Your Casdoor server url
endpoint: http://localhost:8000
client-id: ""
client-secret: ""
# The certificate may be multi-line, you can use `|-` for ease
certificate: ""
# Your organization name added in Casdoor
organization-name: built-in
# Your application name added in Casdoor
application-name: dolphinscheduler
# Doplhinscheduler login url
redirect-url: http://localhost:5173/login
master:
listen-port: 5678
# master prepare execute thread number to limit handle commands in parallel
pre-exec-threads: 10
# master execute thread number to limit process instances in parallel
exec-threads: 10
# master dispatch task number per batch
dispatch-task-number: 3
# master host selector to select a suitable worker, default value: LowerWeight. Optional values include random, round_robin, lower_weight
host-selector: lower_weight
# master heartbeat interval
max-heartbeat-interval: 10s
# master commit task retry times
task-commit-retry-times: 5
# master commit task interval
task-commit-interval: 1s
state-wheel-interval: 5s
server-load-protection:
enabled: true
# Master max system cpu usage, when the master's system cpu usage is smaller then this value, master server can execute workflow.
max-system-cpu-usage-percentage-thresholds: 0.9
# Master max jvm cpu usage, when the master's jvm cpu usage is smaller then this value, master server can execute workflow.
max-jvm-cpu-usage-percentage-thresholds: 0.9
# Master max System memory usage , when the master's system memory usage is smaller then this value, master server can execute workflow.
max-system-memory-usage-percentage-thresholds: 0.9
# Master max disk usage , when the master's disk usage is smaller then this value, master server can execute workflow.
max-disk-usage-percentage-thresholds: 0.9
# failover interval
failover-interval: 10m
# kill yarn/k8s application when failover taskInstance, default true
kill-application-when-task-failover: true
worker-group-refresh-interval: 10s
command-fetch-strategy:
type: ID_SLOT_BASED
config:
# The incremental id step
id-step: 1
# master fetch command num
fetch-size: 10
worker:
# worker listener port
listen-port: 1234
# worker execute thread number to limit task instances in parallel
exec-threads: 10
# worker heartbeat interval
max-heartbeat-interval: 10s
# worker host weight to dispatch tasks, default value 100
host-weight: 100
server-load-protection:
enabled: true
# Worker max system cpu usage, when the worker's system cpu usage is smaller then this value, worker server can be dispatched tasks.
max-system-cpu-usage-percentage-thresholds: 1
# Worker max jvm cpu usage, when the worker's jvm cpu usage is smaller then this value, worker server can be dispatched tasks.
max-jvm-cpu-usage-percentage-thresholds: 0.9
# Worker max System memory usage , when the worker's system memory usage is smaller then this value, worker server can be dispatched tasks.
max-system-memory-usage-percentage-thresholds: 0.9
# Worker max disk usage , when the worker's disk usage is smaller then this value, worker server can be dispatched tasks.
max-disk-usage-percentage-thresholds: 0.9
task-execute-threads-full-policy: REJECT
tenant-config:
# tenant corresponds to the user of the system, which is used by the worker to submit the job. If system does not have this user, it will be automatically created after the parameter worker.tenant.auto.create is true.
auto-create-tenant-enabled: true
# Scenes to be used for distributed users. For example, users created by FreeIpa are stored in LDAP. This parameter only applies to Linux, When this parameter is true, worker.tenant.auto.create has no effect and will not automatically create tenants.
distributed-tenant: false
# If set true, will use worker bootstrap user as the tenant to execute task when the tenant is `default`;
default-tenant-enabled: true
alert:
port: 50052
# Mark each alert of alert server if late after x milliseconds as failed.
# Define value is (0 = infinite), and alert server would be waiting alert result.
wait-timeout: 0
max-heartbeat-interval: 60s
# The maximum number of alerts that can be processed in parallel
sender-parallelism: 5
api:
audit-enable: false
# Traffic control, if you turn on this config, the maximum number of request/s will be limited.
# global max request number per second
# default tenant-level max request number
traffic-control:
global-switch: false
max-global-qps-rate: 300
tenant-switch: false
default-tenant-qps-rate: 10
#customize-tenant-qps-rate:
# eg.
#tenant1: 11
#tenant2: 20
python-gateway:
# Weather enable python gateway server or not. The default value is true.
enabled: true
# Authentication token for connection from python api to python gateway server. Should be changed the default value
# when you deploy in public network.
auth-token: jwUDzpLsNKEFER4*a8gruBH_GsAurNxU7A@Xc
# The address of Python gateway server start. Set its value to `0.0.0.0` if your Python API run in different
# between Python gateway server. It could be be specific to other address like `127.0.0.1` or `localhost`
gateway-server-address: 0.0.0.0
# The port of Python gateway server start. Define which port you could connect to Python gateway server from
# Python API side.
gateway-server-port: 25333
# The address of Python callback client.
python-address: 127.0.0.1
# The port of Python callback client.
python-port: 25334
# Close connection of socket server if no other request accept after x milliseconds. Define value is (0 = infinite),
# and socket server would never close even though no requests accept
connect-timeout: 0
# Close each active connection of socket server if python program not active after x milliseconds. Define value is
# (0 = infinite), and socket server would never close even though no requests accept
read-timeout: 0
server:
port: 12345
servlet:
session:
timeout: 120m
context-path: /dolphinscheduler/
compression:
enabled: true
mime-types: text/html,text/xml,text/plain,text/css,text/javascript,application/javascript,application/json,application/xml
jetty:
max-http-form-post-size: 5000000
accesslog:
enabled: true
custom-format: '%{client}a - %u %t "%r" %s %O %{ms}Tms'
management:
endpoints:
web:
exposure:
include: health,metrics,prometheus
endpoint:
health:
enabled: true
show-details: always
health:
db:
enabled: true
defaults:
enabled: false
metrics:
tags:
application: ${spring.application.name}
metrics:
enabled: true
# Override by profile
---
spring:
config:
activate:
on-profile: postgresql
quartz:
properties:
org.quartz.jobStore.driverDelegateClass: org.quartz.impl.jdbcjobstore.PostgreSQLDelegate
datasource:
driver-class-name: org.postgresql.Driver
url: jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
username: root
password: root
---
spring:
config:
activate:
on-profile: mysql
sql:
init:
schema-locations: classpath:sql/dolphinscheduler_mysql.sql
datasource:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
username: root
password: root