qiaozhanwei
5 years ago
committed by
bao liang
2283 changed files with 143763 additions and 175812 deletions
@ -1,32 +0,0 @@ |
|||||||
## *Tips* |
|
||||||
- *Thanks very much for contributing to Apache DolphinScheduler.* |
|
||||||
- *Please review https://dolphinscheduler.apache.org/en-us/community/index.html before opening a pull request.* |
|
||||||
|
|
||||||
## What is the purpose of the pull request |
|
||||||
|
|
||||||
*(For example: This pull request adds checkstyle plugin.)* |
|
||||||
|
|
||||||
## Brief change log |
|
||||||
|
|
||||||
*(for example:)* |
|
||||||
- *Add maven-checkstyle-plugin to root pom.xml* |
|
||||||
|
|
||||||
## Verify this pull request |
|
||||||
|
|
||||||
*(Please pick either of the following options)* |
|
||||||
|
|
||||||
This pull request is code cleanup without any test coverage. |
|
||||||
|
|
||||||
*(or)* |
|
||||||
|
|
||||||
This pull request is already covered by existing tests, such as *(please describe tests)*. |
|
||||||
|
|
||||||
(or) |
|
||||||
|
|
||||||
This change added tests and can be verified as follows: |
|
||||||
|
|
||||||
*(example:)* |
|
||||||
|
|
||||||
- *Added dolphinscheduler-dao tests for end-to-end.* |
|
||||||
- *Added CronUtilsTest to verify the change.* |
|
||||||
- *Manually verified the change by testing locally.* |
|
@ -1,64 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
name: Backend |
|
||||||
|
|
||||||
on: |
|
||||||
push: |
|
||||||
paths: |
|
||||||
- '.github/workflows/ci_backend.yml' |
|
||||||
- 'package.xml' |
|
||||||
- 'pom.xml' |
|
||||||
- 'dolphinscheduler-alert/**' |
|
||||||
- 'dolphinscheduler-api/**' |
|
||||||
- 'dolphinscheduler-common/**' |
|
||||||
- 'dolphinscheduler-dao/**' |
|
||||||
- 'dolphinscheduler-rpc/**' |
|
||||||
- 'dolphinscheduler-server/**' |
|
||||||
pull_request: |
|
||||||
paths: |
|
||||||
- '.github/workflows/ci_backend.yml' |
|
||||||
- 'package.xml' |
|
||||||
- 'pom.xml' |
|
||||||
- 'dolphinscheduler-alert/**' |
|
||||||
- 'dolphinscheduler-api/**' |
|
||||||
- 'dolphinscheduler-common/**' |
|
||||||
- 'dolphinscheduler-dao/**' |
|
||||||
- 'dolphinscheduler-rpc/**' |
|
||||||
- 'dolphinscheduler-server/**' |
|
||||||
|
|
||||||
jobs: |
|
||||||
Compile-check: |
|
||||||
runs-on: ubuntu-latest |
|
||||||
steps: |
|
||||||
- uses: actions/checkout@v1 |
|
||||||
- name: Set up JDK 1.8 |
|
||||||
uses: actions/setup-java@v1 |
|
||||||
with: |
|
||||||
java-version: 1.8 |
|
||||||
- name: Compile |
|
||||||
run: mvn -U -B -T 1C clean install -Prelease -Dmaven.compile.fork=true -Dmaven.test.skip=true |
|
||||||
License-check: |
|
||||||
runs-on: ubuntu-latest |
|
||||||
steps: |
|
||||||
- uses: actions/checkout@v1 |
|
||||||
- name: Set up JDK 1.8 |
|
||||||
uses: actions/setup-java@v1 |
|
||||||
with: |
|
||||||
java-version: 1.8 |
|
||||||
- name: Check |
|
||||||
run: mvn -B apache-rat:check |
|
@ -1,58 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
name: Frontend |
|
||||||
|
|
||||||
on: |
|
||||||
push: |
|
||||||
paths: |
|
||||||
- '.github/workflows/ci_frontend.yml' |
|
||||||
- 'dolphinscheduler-ui/**' |
|
||||||
pull_request: |
|
||||||
paths: |
|
||||||
- '.github/workflows/ci_frontend.yml' |
|
||||||
- 'dolphinscheduler-ui/**' |
|
||||||
|
|
||||||
jobs: |
|
||||||
Compile-check: |
|
||||||
runs-on: ${{ matrix.os }} |
|
||||||
strategy: |
|
||||||
matrix: |
|
||||||
os: [ubuntu-latest, macos-latest] |
|
||||||
steps: |
|
||||||
- uses: actions/checkout@v1 |
|
||||||
- name: Set up Node.js |
|
||||||
uses: actions/setup-node@v1 |
|
||||||
with: |
|
||||||
version: 8 |
|
||||||
- name: Compile |
|
||||||
run: | |
|
||||||
cd dolphinscheduler-ui |
|
||||||
npm install node-sass --unsafe-perm |
|
||||||
npm install |
|
||||||
npm run build |
|
||||||
|
|
||||||
License-check: |
|
||||||
runs-on: ubuntu-latest |
|
||||||
steps: |
|
||||||
- uses: actions/checkout@v1 |
|
||||||
- name: Set up JDK 1.8 |
|
||||||
uses: actions/setup-java@v1 |
|
||||||
with: |
|
||||||
java-version: 1.8 |
|
||||||
- name: Check |
|
||||||
run: mvn -B apache-rat:check |
|
@ -1,5 +0,0 @@ |
|||||||
Apache DolphinScheduler (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC. |
|
||||||
Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, |
|
||||||
communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. |
|
||||||
While incubation status is not necessarily a reflection of the completeness or stability of the code, |
|
||||||
it does indicate that the project has yet to be fully endorsed by the ASF. |
|
@ -1,5 +1,7 @@ |
|||||||
Apache DolphinScheduler (incubating) |
Easy Scheduler |
||||||
Copyright 2019 The Apache Software Foundation |
Copyright 2019 The Analysys Foundation |
||||||
|
|
||||||
This product includes software developed at |
This product includes software developed at |
||||||
The Apache Software Foundation (http://www.apache.org/). |
The Analysys Foundation (https://www.analysys.cn/). |
||||||
|
|
||||||
|
|
||||||
|
@ -1,11 +0,0 @@ |
|||||||
## Build Image |
|
||||||
``` |
|
||||||
cd .. |
|
||||||
docker build -t dolphinscheduler --build-arg version=1.1.0 --build-arg tar_version=1.1.0-SNAPSHOT -f dockerfile/Dockerfile . |
|
||||||
docker run -p 12345:12345 -p 8888:8888 --rm --name dolphinscheduler -d dolphinscheduler |
|
||||||
``` |
|
||||||
* Visit the url: http://127.0.0.1:8888 |
|
||||||
* UserName:admin Password:dolphinscheduler123 |
|
||||||
|
|
||||||
## Note |
|
||||||
* MacOS: The memory of docker needs to be set to 4G, default 2G. Steps: Preferences -> Advanced -> adjust resources -> Apply & Restart |
|
@ -1,50 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
#alert type is EMAIL/SMS |
|
||||||
alert.type=EMAIL |
|
||||||
|
|
||||||
# mail server configuration |
|
||||||
mail.protocol=SMTP |
|
||||||
mail.server.host=smtp.126.com |
|
||||||
mail.server.port= |
|
||||||
mail.sender=dolphinscheduler@126.com |
|
||||||
mail.user=dolphinscheduler@126.com |
|
||||||
mail.passwd=escheduler123 |
|
||||||
|
|
||||||
# TLS |
|
||||||
mail.smtp.starttls.enable=false |
|
||||||
# SSL |
|
||||||
mail.smtp.ssl.enable=true |
|
||||||
mail.smtp.ssl.trust=smtp.126.com |
|
||||||
|
|
||||||
#xls file path,need create if not exist |
|
||||||
xls.file.path=/tmp/xls |
|
||||||
|
|
||||||
# Enterprise WeChat configuration |
|
||||||
enterprise.wechat.enable=false |
|
||||||
enterprise.wechat.corp.id=xxxxxxx |
|
||||||
enterprise.wechat.secret=xxxxxxx |
|
||||||
enterprise.wechat.agent.id=xxxxxxx |
|
||||||
enterprise.wechat.users=xxxxxxx |
|
||||||
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret |
|
||||||
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token |
|
||||||
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"} |
|
||||||
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}} |
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,49 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html --> |
|
||||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> |
|
||||||
<property name="log.base" value="logs" /> |
|
||||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> |
|
||||||
<encoder> |
|
||||||
<pattern> |
|
||||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
|
||||||
</pattern> |
|
||||||
<charset>UTF-8</charset> |
|
||||||
</encoder> |
|
||||||
</appender> |
|
||||||
|
|
||||||
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> |
|
||||||
<file>${log.base}/dolphinscheduler-alert.log</file> |
|
||||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> |
|
||||||
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> |
|
||||||
<maxHistory>20</maxHistory> |
|
||||||
<maxFileSize>64MB</maxFileSize> |
|
||||||
</rollingPolicy> |
|
||||||
<encoder> |
|
||||||
<pattern> |
|
||||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
|
||||||
</pattern> |
|
||||||
<charset>UTF-8</charset> |
|
||||||
</encoder> |
|
||||||
</appender> |
|
||||||
|
|
||||||
<root level="INFO"> |
|
||||||
<appender-ref ref="ALERTLOGFILE"/> |
|
||||||
</root> |
|
||||||
</configuration> |
|
@ -1,60 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html --> |
|
||||||
<configuration scan="true" scanPeriod="120 seconds"> |
|
||||||
<logger name="org.apache.zookeeper" level="WARN"/> |
|
||||||
<logger name="org.apache.hbase" level="WARN"/> |
|
||||||
<logger name="org.apache.hadoop" level="WARN"/> |
|
||||||
|
|
||||||
<property name="log.base" value="logs" /> |
|
||||||
|
|
||||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> |
|
||||||
<encoder> |
|
||||||
<pattern> |
|
||||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
|
||||||
</pattern> |
|
||||||
<charset>UTF-8</charset> |
|
||||||
</encoder> |
|
||||||
</appender> |
|
||||||
|
|
||||||
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> |
|
||||||
<!-- Log level filter --> |
|
||||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter"> |
|
||||||
<level>INFO</level> |
|
||||||
</filter> |
|
||||||
<file>${log.base}/dolphinscheduler-api-server.log</file> |
|
||||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> |
|
||||||
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> |
|
||||||
<maxHistory>168</maxHistory> |
|
||||||
<maxFileSize>64MB</maxFileSize> |
|
||||||
</rollingPolicy> |
|
||||||
|
|
||||||
<encoder> |
|
||||||
<pattern> |
|
||||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
|
||||||
</pattern> |
|
||||||
<charset>UTF-8</charset> |
|
||||||
</encoder> |
|
||||||
|
|
||||||
</appender> |
|
||||||
|
|
||||||
<root level="INFO"> |
|
||||||
<appender-ref ref="APISERVERLOGFILE" /> |
|
||||||
</root> |
|
||||||
</configuration> |
|
@ -1,40 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
logging.config=classpath:apiserver_logback.xml |
|
||||||
|
|
||||||
# server port |
|
||||||
server.port=12345 |
|
||||||
|
|
||||||
# session config |
|
||||||
server.servlet.session.timeout=7200 |
|
||||||
|
|
||||||
server.servlet.context-path=/dolphinscheduler/ |
|
||||||
|
|
||||||
# file size limit for upload |
|
||||||
spring.servlet.multipart.max-file-size=1024MB |
|
||||||
spring.servlet.multipart.max-request-size=1024MB |
|
||||||
|
|
||||||
#post content |
|
||||||
server.jetty.max-http-post-size=5000000 |
|
||||||
|
|
||||||
spring.messages.encoding=UTF-8 |
|
||||||
|
|
||||||
#i18n classpath folder , file prefix messages, if have many files, use "," seperator |
|
||||||
spring.messages.basename=i18n/messages |
|
||||||
|
|
||||||
|
|
@ -1,103 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
# base spring data source configuration |
|
||||||
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource |
|
||||||
# postgresql |
|
||||||
spring.datasource.driver-class-name=org.postgresql.Driver |
|
||||||
spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler |
|
||||||
spring.datasource.username=root |
|
||||||
spring.datasource.password=root@123 |
|
||||||
|
|
||||||
# connection configuration |
|
||||||
spring.datasource.initialSize=5 |
|
||||||
# min connection number |
|
||||||
spring.datasource.minIdle=5 |
|
||||||
# max connection number |
|
||||||
spring.datasource.maxActive=50 |
|
||||||
|
|
||||||
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. |
|
||||||
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. |
|
||||||
spring.datasource.maxWait=60000 |
|
||||||
|
|
||||||
# milliseconds for check to close free connections |
|
||||||
spring.datasource.timeBetweenEvictionRunsMillis=60000 |
|
||||||
|
|
||||||
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. |
|
||||||
spring.datasource.timeBetweenConnectErrorMillis=60000 |
|
||||||
|
|
||||||
# the longest time a connection remains idle without being evicted, in milliseconds |
|
||||||
spring.datasource.minEvictableIdleTimeMillis=300000 |
|
||||||
|
|
||||||
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. |
|
||||||
spring.datasource.validationQuery=SELECT 1 |
|
||||||
|
|
||||||
#check whether the connection is valid for timeout, in seconds |
|
||||||
spring.datasource.validationQueryTimeout=3 |
|
||||||
|
|
||||||
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis, |
|
||||||
# validation Query is performed to check whether the connection is valid |
|
||||||
spring.datasource.testWhileIdle=true |
|
||||||
|
|
||||||
#execute validation to check if the connection is valid when applying for a connection |
|
||||||
spring.datasource.testOnBorrow=true |
|
||||||
#execute validation to check if the connection is valid when the connection is returned |
|
||||||
spring.datasource.testOnReturn=false |
|
||||||
spring.datasource.defaultAutoCommit=true |
|
||||||
spring.datasource.keepAlive=true |
|
||||||
|
|
||||||
# open PSCache, specify count PSCache for every connection |
|
||||||
spring.datasource.poolPreparedStatements=true |
|
||||||
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20 |
|
||||||
|
|
||||||
spring.datasource.spring.datasource.filters=stat,wall,log4j |
|
||||||
spring.datasource.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000 |
|
||||||
|
|
||||||
#mybatis |
|
||||||
mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml |
|
||||||
|
|
||||||
mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums |
|
||||||
|
|
||||||
#Entity scan, where multiple packages are separated by a comma or semicolon |
|
||||||
mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity |
|
||||||
|
|
||||||
#Primary key type AUTO:" database ID AUTO ", INPUT:" user INPUT ID", ID_WORKER:" global unique ID (numeric type unique ID)", UUID:" global unique ID UUID"; |
|
||||||
mybatis-plus.global-config.db-config.id-type=AUTO |
|
||||||
|
|
||||||
#Field policy IGNORED:" ignore judgment ",NOT_NULL:" not NULL judgment "),NOT_EMPTY:" not NULL judgment" |
|
||||||
mybatis-plus.global-config.db-config.field-strategy=NOT_NULL |
|
||||||
|
|
||||||
#The hump underline is converted |
|
||||||
mybatis-plus.global-config.db-config.column-underline=true |
|
||||||
mybatis-plus.global-config.db-config.logic-delete-value=-1 |
|
||||||
mybatis-plus.global-config.db-config.logic-not-delete-value=0 |
|
||||||
mybatis-plus.global-config.db-config.banner=false |
|
||||||
#The original configuration |
|
||||||
mybatis-plus.configuration.map-underscore-to-camel-case=true |
|
||||||
mybatis-plus.configuration.cache-enabled=false |
|
||||||
mybatis-plus.configuration.call-setters-on-nulls=true |
|
||||||
mybatis-plus.configuration.jdbc-type-for-null=null |
|
||||||
|
|
||||||
# data quality analysis is not currently in use. please ignore the following configuration |
|
||||||
# task record flag |
|
||||||
task.record.flag=false |
|
||||||
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8 |
|
||||||
task.record.datasource.username=xx |
|
||||||
task.record.datasource.password=xx |
|
||||||
|
|
||||||
# Logger Config |
|
||||||
#logging.level.org.apache.dolphinscheduler.dao=debug |
|
@ -1,59 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
#task queue implementation, default "zookeeper" |
|
||||||
dolphinscheduler.queue.impl=zookeeper |
|
||||||
|
|
||||||
# user data directory path, self configuration, please make sure the directory exists and have read write permissions |
|
||||||
data.basedir.path=/tmp/dolphinscheduler |
|
||||||
|
|
||||||
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions |
|
||||||
data.download.basedir.path=/tmp/dolphinscheduler/download |
|
||||||
|
|
||||||
# process execute directory. self configuration, please make sure the directory exists and have read write permissions |
|
||||||
process.exec.basepath=/tmp/dolphinscheduler/exec |
|
||||||
|
|
||||||
# Users who have permission to create directories under the HDFS root path |
|
||||||
hdfs.root.user=hdfs |
|
||||||
|
|
||||||
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended |
|
||||||
data.store2hdfs.basepath=/dolphinscheduler |
|
||||||
|
|
||||||
# resource upload startup type : HDFS,S3,NONE |
|
||||||
res.upload.startup.type=NONE |
|
||||||
|
|
||||||
# whether kerberos starts |
|
||||||
hadoop.security.authentication.startup.state=false |
|
||||||
|
|
||||||
# java.security.krb5.conf path |
|
||||||
java.security.krb5.conf.path=/opt/krb5.conf |
|
||||||
|
|
||||||
# loginUserFromKeytab user |
|
||||||
login.user.keytab.username=hdfs-mycluster@ESZ.COM |
|
||||||
|
|
||||||
# loginUserFromKeytab path |
|
||||||
login.user.keytab.path=/opt/hdfs.headless.keytab |
|
||||||
|
|
||||||
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions |
|
||||||
dolphinscheduler.env.path=/opt/dolphinscheduler/conf/env/.dolphinscheduler_env.sh |
|
||||||
|
|
||||||
#resource.view.suffixs |
|
||||||
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml |
|
||||||
|
|
||||||
# is development state? default "false" |
|
||||||
development.state=true |
|
||||||
|
|
@ -1,35 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml |
|
||||||
# to the conf directory,support s3,for example : s3a://dolphinscheduler |
|
||||||
fs.defaultFS=hdfs://mycluster:8020 |
|
||||||
|
|
||||||
# s3 need,s3 endpoint |
|
||||||
fs.s3a.endpoint=http://192.168.199.91:9010 |
|
||||||
|
|
||||||
# s3 need,s3 access key |
|
||||||
fs.s3a.access.key=A3DXS30FO22544RE |
|
||||||
|
|
||||||
# s3 need,s3 secret key |
|
||||||
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK |
|
||||||
|
|
||||||
#resourcemanager ha note this need ips , this empty if single |
|
||||||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx |
|
||||||
|
|
||||||
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine |
|
||||||
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s |
|
@ -1,20 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
installPath=/data1_1T/dolphinscheduler |
|
||||||
deployUser=dolphinscheduler |
|
||||||
ips=ark0,ark1,ark2,ark3,ark4 |
|
@ -1,21 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
masters=ark0,ark1 |
|
||||||
workers=ark2,ark3,ark4 |
|
||||||
alertServer=ark3 |
|
||||||
apiServers=ark1 |
|
@ -1,20 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
export PYTHON_HOME=/usr/bin/python |
|
||||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 |
|
||||||
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH |
|
@ -1,20 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
export PYTHON_HOME=/usr/bin/python |
|
||||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 |
|
||||||
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH |
|
@ -1,252 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
QUERY_SCHEDULE_LIST_NOTES=query schedule list |
|
||||||
EXECUTE_PROCESS_TAG=execute process related operation |
|
||||||
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation |
|
||||||
RUN_PROCESS_INSTANCE_NOTES=run process instance |
|
||||||
START_NODE_LIST=start node list(node name) |
|
||||||
TASK_DEPEND_TYPE=task depend type |
|
||||||
COMMAND_TYPE=command type |
|
||||||
RUN_MODE=run mode |
|
||||||
TIMEOUT=timeout |
|
||||||
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance |
|
||||||
EXECUTE_TYPE=execute type |
|
||||||
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition |
|
||||||
GET_RECEIVER_CC_NOTES=query receiver cc |
|
||||||
DESC=description |
|
||||||
GROUP_NAME=group name |
|
||||||
GROUP_TYPE=group type |
|
||||||
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list |
|
||||||
UPDATE_ALERT_GROUP_NOTES=update alert group |
|
||||||
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id |
|
||||||
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not |
|
||||||
GRANT_ALERT_GROUP_NOTES=grant alert group |
|
||||||
USER_IDS=user id list |
|
||||||
ALERT_GROUP_TAG=alert group related operation |
|
||||||
CREATE_ALERT_GROUP_NOTES=create alert group |
|
||||||
WORKER_GROUP_TAG=worker group related operation |
|
||||||
SAVE_WORKER_GROUP_NOTES=create worker group |
|
||||||
WORKER_GROUP_NAME=worker group name |
|
||||||
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 |
|
||||||
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging |
|
||||||
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list |
|
||||||
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id |
|
||||||
DATA_ANALYSIS_TAG=analysis related operation of task state |
|
||||||
COUNT_TASK_STATE_NOTES=count task state |
|
||||||
COUNT_PROCESS_INSTANCE_NOTES=count process instance state |
|
||||||
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user |
|
||||||
COUNT_COMMAND_STATE_NOTES=count command state |
|
||||||
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ |
|
||||||
|
|
||||||
ACCESS_TOKEN_TAG=access token related operation |
|
||||||
MONITOR_TAG=monitor related operation |
|
||||||
MASTER_LIST_NOTES=master server list |
|
||||||
WORKER_LIST_NOTES=worker server list |
|
||||||
QUERY_DATABASE_STATE_NOTES=query database state |
|
||||||
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE |
|
||||||
TASK_STATE=task instance state |
|
||||||
SOURCE_TABLE=SOURCE TABLE |
|
||||||
DEST_TABLE=dest table |
|
||||||
TASK_DATE=task date |
|
||||||
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging |
|
||||||
DATA_SOURCE_TAG=data source related operation |
|
||||||
CREATE_DATA_SOURCE_NOTES=create data source |
|
||||||
DATA_SOURCE_NAME=data source name |
|
||||||
DATA_SOURCE_NOTE=data source desc |
|
||||||
DB_TYPE=database type |
|
||||||
DATA_SOURCE_HOST=DATA SOURCE HOST |
|
||||||
DATA_SOURCE_PORT=data source port |
|
||||||
DATABASE_NAME=database name |
|
||||||
QUEUE_TAG=queue related operation |
|
||||||
QUERY_QUEUE_LIST_NOTES=query queue list |
|
||||||
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging |
|
||||||
CREATE_QUEUE_NOTES=create queue |
|
||||||
YARN_QUEUE_NAME=yarn(hadoop) queue name |
|
||||||
QUEUE_ID=queue id |
|
||||||
TENANT_DESC=tenant desc |
|
||||||
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging |
|
||||||
QUERY_TENANT_LIST_NOTES=query tenant list |
|
||||||
UPDATE_TENANT_NOTES=update tenant |
|
||||||
DELETE_TENANT_NOTES=delete tenant |
|
||||||
RESOURCES_TAG=resource center related operation |
|
||||||
CREATE_RESOURCE_NOTES=create resource |
|
||||||
RESOURCE_TYPE=resource file type |
|
||||||
RESOURCE_NAME=resource name |
|
||||||
RESOURCE_DESC=resource file desc |
|
||||||
RESOURCE_FILE=resource file |
|
||||||
RESOURCE_ID=resource id |
|
||||||
QUERY_RESOURCE_LIST_NOTES=query resource list |
|
||||||
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id |
|
||||||
VIEW_RESOURCE_BY_ID_NOTES=view resource by id |
|
||||||
ONLINE_CREATE_RESOURCE_NOTES=online create resource |
|
||||||
SUFFIX=resource file suffix |
|
||||||
CONTENT=resource file content |
|
||||||
UPDATE_RESOURCE_NOTES=edit resource file online |
|
||||||
DOWNLOAD_RESOURCE_NOTES=download resource file |
|
||||||
CREATE_UDF_FUNCTION_NOTES=create udf function |
|
||||||
UDF_TYPE=UDF type |
|
||||||
FUNC_NAME=function name |
|
||||||
CLASS_NAME=package and class name |
|
||||||
ARG_TYPES=arguments |
|
||||||
UDF_DESC=udf desc |
|
||||||
VIEW_UDF_FUNCTION_NOTES=view udf function |
|
||||||
UPDATE_UDF_FUNCTION_NOTES=update udf function |
|
||||||
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging |
|
||||||
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name |
|
||||||
DELETE_UDF_FUNCTION_NOTES=delete udf function |
|
||||||
AUTHORIZED_FILE_NOTES=authorized file |
|
||||||
UNAUTHORIZED_FILE_NOTES=unauthorized file |
|
||||||
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func |
|
||||||
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func |
|
||||||
VERIFY_QUEUE_NOTES=verify queue |
|
||||||
TENANT_TAG=tenant related operation |
|
||||||
CREATE_TENANT_NOTES=create tenant |
|
||||||
TENANT_CODE=tenant code |
|
||||||
TENANT_NAME=tenant name |
|
||||||
QUEUE_NAME=queue name |
|
||||||
PASSWORD=password |
|
||||||
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} |
|
||||||
PROJECT_TAG=project related operation |
|
||||||
CREATE_PROJECT_NOTES=create project |
|
||||||
PROJECT_DESC=project description |
|
||||||
UPDATE_PROJECT_NOTES=update project |
|
||||||
PROJECT_ID=project id |
|
||||||
QUERY_PROJECT_BY_ID_NOTES=query project info by project id |
|
||||||
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING |
|
||||||
DELETE_PROJECT_BY_ID_NOTES=delete project by id |
|
||||||
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project |
|
||||||
QUERY_ALL_PROJECT_LIST_NOTES=query all project list |
|
||||||
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project |
|
||||||
TASK_RECORD_TAG=task record related operation |
|
||||||
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging |
|
||||||
CREATE_TOKEN_NOTES=create token ,note: please login first |
|
||||||
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging |
|
||||||
SCHEDULE=schedule |
|
||||||
WARNING_TYPE=warning type(sending strategy) |
|
||||||
WARNING_GROUP_ID=warning group id |
|
||||||
FAILURE_STRATEGY=failure strategy |
|
||||||
RECEIVERS=receivers |
|
||||||
RECEIVERS_CC=receivers cc |
|
||||||
WORKER_GROUP_ID=worker server group id |
|
||||||
PROCESS_INSTANCE_PRIORITY=process instance priority |
|
||||||
UPDATE_SCHEDULE_NOTES=update schedule |
|
||||||
SCHEDULE_ID=schedule id |
|
||||||
ONLINE_SCHEDULE_NOTES=online schedule |
|
||||||
OFFLINE_SCHEDULE_NOTES=offline schedule |
|
||||||
QUERY_SCHEDULE_NOTES=query schedule |
|
||||||
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging |
|
||||||
LOGIN_TAG=User login related operations |
|
||||||
USER_NAME=user name |
|
||||||
PROJECT_NAME=project name |
|
||||||
CREATE_PROCESS_DEFINITION_NOTES=create process definition |
|
||||||
PROCESS_DEFINITION_NAME=process definition name |
|
||||||
PROCESS_DEFINITION_JSON=process definition detail info (json format) |
|
||||||
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) |
|
||||||
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) |
|
||||||
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) |
|
||||||
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) |
|
||||||
PROCESS_DEFINITION_DESC=process definition desc |
|
||||||
PROCESS_DEFINITION_TAG=process definition related opertation |
|
||||||
SIGNOUT_NOTES=logout |
|
||||||
USER_PASSWORD=user password |
|
||||||
UPDATE_PROCESS_INSTANCE_NOTES=update process instance |
|
||||||
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list |
|
||||||
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name |
|
||||||
LOGIN_NOTES=user login |
|
||||||
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition |
|
||||||
PROCESS_DEFINITION_ID=process definition id |
|
||||||
PROCESS_DEFINITION_IDS=process definition ids |
|
||||||
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition |
|
||||||
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id |
|
||||||
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list |
|
||||||
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging |
|
||||||
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list |
|
||||||
PAGE_NO=page no |
|
||||||
PROCESS_INSTANCE_ID=process instance id |
|
||||||
PROCESS_INSTANCE_JSON=process instance info(json format) |
|
||||||
SCHEDULE_TIME=schedule time |
|
||||||
SYNC_DEFINE=update the information of the process instance to the process definition\ |
|
||||||
|
|
||||||
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance |
|
||||||
SEARCH_VAL=search val |
|
||||||
USER_ID=user id |
|
||||||
PAGE_SIZE=page size |
|
||||||
LIMIT=limit |
|
||||||
VIEW_TREE_NOTES=view tree |
|
||||||
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id |
|
||||||
PROCESS_DEFINITION_ID_LIST=process definition id list |
|
||||||
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id |
|
||||||
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id |
|
||||||
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids |
|
||||||
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id |
|
||||||
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id |
|
||||||
TASK_ID=task instance id |
|
||||||
SKIP_LINE_NUM=skip line num |
|
||||||
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log |
|
||||||
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log |
|
||||||
USERS_TAG=users related operation |
|
||||||
SCHEDULER_TAG=scheduler related operation |
|
||||||
CREATE_SCHEDULE_NOTES=create schedule |
|
||||||
CREATE_USER_NOTES=create user |
|
||||||
TENANT_ID=tenant id |
|
||||||
QUEUE=queue |
|
||||||
EMAIL=email |
|
||||||
PHONE=phone |
|
||||||
QUERY_USER_LIST_NOTES=query user list |
|
||||||
UPDATE_USER_NOTES=update user |
|
||||||
DELETE_USER_BY_ID_NOTES=delete user by id |
|
||||||
GRANT_PROJECT_NOTES=GRANT PROJECT |
|
||||||
PROJECT_IDS=project ids(string format, multiple projects separated by ",") |
|
||||||
GRANT_RESOURCE_NOTES=grant resource file |
|
||||||
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") |
|
||||||
GET_USER_INFO_NOTES=get user info |
|
||||||
LIST_USER_NOTES=list user |
|
||||||
VERIFY_USER_NAME_NOTES=verify user name |
|
||||||
UNAUTHORIZED_USER_NOTES=cancel authorization |
|
||||||
ALERT_GROUP_ID=alert group id |
|
||||||
AUTHORIZED_USER_NOTES=authorized user |
|
||||||
GRANT_UDF_FUNC_NOTES=grant udf function |
|
||||||
UDF_IDS=udf ids(string format, multiple udf functions separated by ",") |
|
||||||
GRANT_DATASOURCE_NOTES=grant datasource |
|
||||||
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") |
|
||||||
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id |
|
||||||
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id |
|
||||||
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables |
|
||||||
VIEW_GANTT_NOTES=view gantt |
|
||||||
SUB_PROCESS_INSTANCE_ID=sub process instance id |
|
||||||
TASK_NAME=task instance name |
|
||||||
TASK_INSTANCE_TAG=task instance related operation |
|
||||||
LOGGER_TAG=log related operation |
|
||||||
PROCESS_INSTANCE_TAG=process instance related operation |
|
||||||
EXECUTION_STATUS=runing status for workflow and task nodes |
|
||||||
HOST=ip address of running task |
|
||||||
START_DATE=start date |
|
||||||
END_DATE=end date |
|
||||||
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id |
|
||||||
UPDATE_DATA_SOURCE_NOTES=update data source |
|
||||||
DATA_SOURCE_ID=DATA SOURCE ID |
|
||||||
QUERY_DATA_SOURCE_NOTES=query data source by id |
|
||||||
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type |
|
||||||
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging |
|
||||||
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE |
|
||||||
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test |
|
||||||
DELETE_DATA_SOURCE_NOTES=delete data source |
|
||||||
VERIFY_DATA_SOURCE_NOTES=verify data source |
|
||||||
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source |
|
||||||
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source |
|
||||||
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id |
|
@ -1,252 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
QUERY_SCHEDULE_LIST_NOTES=query schedule list |
|
||||||
EXECUTE_PROCESS_TAG=execute process related operation |
|
||||||
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation |
|
||||||
RUN_PROCESS_INSTANCE_NOTES=run process instance |
|
||||||
START_NODE_LIST=start node list(node name) |
|
||||||
TASK_DEPEND_TYPE=task depend type |
|
||||||
COMMAND_TYPE=command type |
|
||||||
RUN_MODE=run mode |
|
||||||
TIMEOUT=timeout |
|
||||||
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance |
|
||||||
EXECUTE_TYPE=execute type |
|
||||||
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition |
|
||||||
GET_RECEIVER_CC_NOTES=query receiver cc |
|
||||||
DESC=description |
|
||||||
GROUP_NAME=group name |
|
||||||
GROUP_TYPE=group type |
|
||||||
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list |
|
||||||
UPDATE_ALERT_GROUP_NOTES=update alert group |
|
||||||
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id |
|
||||||
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not |
|
||||||
GRANT_ALERT_GROUP_NOTES=grant alert group |
|
||||||
USER_IDS=user id list |
|
||||||
ALERT_GROUP_TAG=alert group related operation |
|
||||||
CREATE_ALERT_GROUP_NOTES=create alert group |
|
||||||
WORKER_GROUP_TAG=worker group related operation |
|
||||||
SAVE_WORKER_GROUP_NOTES=create worker group |
|
||||||
WORKER_GROUP_NAME=worker group name |
|
||||||
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 |
|
||||||
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging |
|
||||||
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list |
|
||||||
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id |
|
||||||
DATA_ANALYSIS_TAG=analysis related operation of task state |
|
||||||
COUNT_TASK_STATE_NOTES=count task state |
|
||||||
COUNT_PROCESS_INSTANCE_NOTES=count process instance state |
|
||||||
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user |
|
||||||
COUNT_COMMAND_STATE_NOTES=count command state |
|
||||||
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ |
|
||||||
|
|
||||||
ACCESS_TOKEN_TAG=access token related operation |
|
||||||
MONITOR_TAG=monitor related operation |
|
||||||
MASTER_LIST_NOTES=master server list |
|
||||||
WORKER_LIST_NOTES=worker server list |
|
||||||
QUERY_DATABASE_STATE_NOTES=query database state |
|
||||||
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE |
|
||||||
TASK_STATE=task instance state |
|
||||||
SOURCE_TABLE=SOURCE TABLE |
|
||||||
DEST_TABLE=dest table |
|
||||||
TASK_DATE=task date |
|
||||||
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging |
|
||||||
DATA_SOURCE_TAG=data source related operation |
|
||||||
CREATE_DATA_SOURCE_NOTES=create data source |
|
||||||
DATA_SOURCE_NAME=data source name |
|
||||||
DATA_SOURCE_NOTE=data source desc |
|
||||||
DB_TYPE=database type |
|
||||||
DATA_SOURCE_HOST=DATA SOURCE HOST |
|
||||||
DATA_SOURCE_PORT=data source port |
|
||||||
DATABASE_NAME=database name |
|
||||||
QUEUE_TAG=queue related operation |
|
||||||
QUERY_QUEUE_LIST_NOTES=query queue list |
|
||||||
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging |
|
||||||
CREATE_QUEUE_NOTES=create queue |
|
||||||
YARN_QUEUE_NAME=yarn(hadoop) queue name |
|
||||||
QUEUE_ID=queue id |
|
||||||
TENANT_DESC=tenant desc |
|
||||||
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging |
|
||||||
QUERY_TENANT_LIST_NOTES=query tenant list |
|
||||||
UPDATE_TENANT_NOTES=update tenant |
|
||||||
DELETE_TENANT_NOTES=delete tenant |
|
||||||
RESOURCES_TAG=resource center related operation |
|
||||||
CREATE_RESOURCE_NOTES=create resource |
|
||||||
RESOURCE_TYPE=resource file type |
|
||||||
RESOURCE_NAME=resource name |
|
||||||
RESOURCE_DESC=resource file desc |
|
||||||
RESOURCE_FILE=resource file |
|
||||||
RESOURCE_ID=resource id |
|
||||||
QUERY_RESOURCE_LIST_NOTES=query resource list |
|
||||||
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id |
|
||||||
VIEW_RESOURCE_BY_ID_NOTES=view resource by id |
|
||||||
ONLINE_CREATE_RESOURCE_NOTES=online create resource |
|
||||||
SUFFIX=resource file suffix |
|
||||||
CONTENT=resource file content |
|
||||||
UPDATE_RESOURCE_NOTES=edit resource file online |
|
||||||
DOWNLOAD_RESOURCE_NOTES=download resource file |
|
||||||
CREATE_UDF_FUNCTION_NOTES=create udf function |
|
||||||
UDF_TYPE=UDF type |
|
||||||
FUNC_NAME=function name |
|
||||||
CLASS_NAME=package and class name |
|
||||||
ARG_TYPES=arguments |
|
||||||
UDF_DESC=udf desc |
|
||||||
VIEW_UDF_FUNCTION_NOTES=view udf function |
|
||||||
UPDATE_UDF_FUNCTION_NOTES=update udf function |
|
||||||
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging |
|
||||||
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name |
|
||||||
DELETE_UDF_FUNCTION_NOTES=delete udf function |
|
||||||
AUTHORIZED_FILE_NOTES=authorized file |
|
||||||
UNAUTHORIZED_FILE_NOTES=unauthorized file |
|
||||||
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func |
|
||||||
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func |
|
||||||
VERIFY_QUEUE_NOTES=verify queue |
|
||||||
TENANT_TAG=tenant related operation |
|
||||||
CREATE_TENANT_NOTES=create tenant |
|
||||||
TENANT_CODE=tenant code |
|
||||||
TENANT_NAME=tenant name |
|
||||||
QUEUE_NAME=queue name |
|
||||||
PASSWORD=password |
|
||||||
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} |
|
||||||
PROJECT_TAG=project related operation |
|
||||||
CREATE_PROJECT_NOTES=create project |
|
||||||
PROJECT_DESC=project description |
|
||||||
UPDATE_PROJECT_NOTES=update project |
|
||||||
PROJECT_ID=project id |
|
||||||
QUERY_PROJECT_BY_ID_NOTES=query project info by project id |
|
||||||
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING |
|
||||||
QUERY_ALL_PROJECT_LIST_NOTES=query all project list |
|
||||||
DELETE_PROJECT_BY_ID_NOTES=delete project by id |
|
||||||
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project |
|
||||||
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project |
|
||||||
TASK_RECORD_TAG=task record related operation |
|
||||||
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging |
|
||||||
CREATE_TOKEN_NOTES=create token ,note: please login first |
|
||||||
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging |
|
||||||
SCHEDULE=schedule |
|
||||||
WARNING_TYPE=warning type(sending strategy) |
|
||||||
WARNING_GROUP_ID=warning group id |
|
||||||
FAILURE_STRATEGY=failure strategy |
|
||||||
RECEIVERS=receivers |
|
||||||
RECEIVERS_CC=receivers cc |
|
||||||
WORKER_GROUP_ID=worker server group id |
|
||||||
PROCESS_INSTANCE_PRIORITY=process instance priority |
|
||||||
UPDATE_SCHEDULE_NOTES=update schedule |
|
||||||
SCHEDULE_ID=schedule id |
|
||||||
ONLINE_SCHEDULE_NOTES=online schedule |
|
||||||
OFFLINE_SCHEDULE_NOTES=offline schedule |
|
||||||
QUERY_SCHEDULE_NOTES=query schedule |
|
||||||
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging |
|
||||||
LOGIN_TAG=User login related operations |
|
||||||
USER_NAME=user name |
|
||||||
PROJECT_NAME=project name |
|
||||||
CREATE_PROCESS_DEFINITION_NOTES=create process definition |
|
||||||
PROCESS_DEFINITION_NAME=process definition name |
|
||||||
PROCESS_DEFINITION_JSON=process definition detail info (json format) |
|
||||||
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) |
|
||||||
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) |
|
||||||
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) |
|
||||||
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) |
|
||||||
PROCESS_DEFINITION_DESC=process definition desc |
|
||||||
PROCESS_DEFINITION_TAG=process definition related opertation |
|
||||||
SIGNOUT_NOTES=logout |
|
||||||
USER_PASSWORD=user password |
|
||||||
UPDATE_PROCESS_INSTANCE_NOTES=update process instance |
|
||||||
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list |
|
||||||
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name |
|
||||||
LOGIN_NOTES=user login |
|
||||||
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition |
|
||||||
PROCESS_DEFINITION_ID=process definition id |
|
||||||
PROCESS_DEFINITION_IDS=process definition ids |
|
||||||
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition |
|
||||||
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id |
|
||||||
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list |
|
||||||
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging |
|
||||||
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list |
|
||||||
PAGE_NO=page no |
|
||||||
PROCESS_INSTANCE_ID=process instance id |
|
||||||
PROCESS_INSTANCE_JSON=process instance info(json format) |
|
||||||
SCHEDULE_TIME=schedule time |
|
||||||
SYNC_DEFINE=update the information of the process instance to the process definition\ |
|
||||||
|
|
||||||
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance |
|
||||||
SEARCH_VAL=search val |
|
||||||
USER_ID=user id |
|
||||||
PAGE_SIZE=page size |
|
||||||
LIMIT=limit |
|
||||||
VIEW_TREE_NOTES=view tree |
|
||||||
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id |
|
||||||
PROCESS_DEFINITION_ID_LIST=process definition id list |
|
||||||
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id |
|
||||||
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id |
|
||||||
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids |
|
||||||
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id |
|
||||||
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id |
|
||||||
TASK_ID=task instance id |
|
||||||
SKIP_LINE_NUM=skip line num |
|
||||||
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log |
|
||||||
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log |
|
||||||
USERS_TAG=users related operation |
|
||||||
SCHEDULER_TAG=scheduler related operation |
|
||||||
CREATE_SCHEDULE_NOTES=create schedule |
|
||||||
CREATE_USER_NOTES=create user |
|
||||||
TENANT_ID=tenant id |
|
||||||
QUEUE=queue |
|
||||||
EMAIL=email |
|
||||||
PHONE=phone |
|
||||||
QUERY_USER_LIST_NOTES=query user list |
|
||||||
UPDATE_USER_NOTES=update user |
|
||||||
DELETE_USER_BY_ID_NOTES=delete user by id |
|
||||||
GRANT_PROJECT_NOTES=GRANT PROJECT |
|
||||||
PROJECT_IDS=project ids(string format, multiple projects separated by ",") |
|
||||||
GRANT_RESOURCE_NOTES=grant resource file |
|
||||||
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") |
|
||||||
GET_USER_INFO_NOTES=get user info |
|
||||||
LIST_USER_NOTES=list user |
|
||||||
VERIFY_USER_NAME_NOTES=verify user name |
|
||||||
UNAUTHORIZED_USER_NOTES=cancel authorization |
|
||||||
ALERT_GROUP_ID=alert group id |
|
||||||
AUTHORIZED_USER_NOTES=authorized user |
|
||||||
GRANT_UDF_FUNC_NOTES=grant udf function |
|
||||||
UDF_IDS=udf ids(string format, multiple udf functions separated by ",") |
|
||||||
GRANT_DATASOURCE_NOTES=grant datasource |
|
||||||
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") |
|
||||||
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id |
|
||||||
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id |
|
||||||
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables |
|
||||||
VIEW_GANTT_NOTES=view gantt |
|
||||||
SUB_PROCESS_INSTANCE_ID=sub process instance id |
|
||||||
TASK_NAME=task instance name |
|
||||||
TASK_INSTANCE_TAG=task instance related operation |
|
||||||
LOGGER_TAG=log related operation |
|
||||||
PROCESS_INSTANCE_TAG=process instance related operation |
|
||||||
EXECUTION_STATUS=runing status for workflow and task nodes |
|
||||||
HOST=ip address of running task |
|
||||||
START_DATE=start date |
|
||||||
END_DATE=end date |
|
||||||
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id |
|
||||||
UPDATE_DATA_SOURCE_NOTES=update data source |
|
||||||
DATA_SOURCE_ID=DATA SOURCE ID |
|
||||||
QUERY_DATA_SOURCE_NOTES=query data source by id |
|
||||||
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type |
|
||||||
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging |
|
||||||
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE |
|
||||||
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test |
|
||||||
DELETE_DATA_SOURCE_NOTES=delete data source |
|
||||||
VERIFY_DATA_SOURCE_NOTES=verify data source |
|
||||||
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source |
|
||||||
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source |
|
||||||
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id |
|
@ -1,250 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
QUERY_SCHEDULE_LIST_NOTES=查询定时列表 |
|
||||||
PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作 |
|
||||||
RUN_PROCESS_INSTANCE_NOTES=运行流程实例 |
|
||||||
START_NODE_LIST=开始节点列表(节点name) |
|
||||||
TASK_DEPEND_TYPE=任务依赖类型 |
|
||||||
COMMAND_TYPE=指令类型 |
|
||||||
RUN_MODE=运行模式 |
|
||||||
TIMEOUT=超时时间 |
|
||||||
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等) |
|
||||||
EXECUTE_TYPE=执行类型 |
|
||||||
START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义 |
|
||||||
DESC=备注(描述) |
|
||||||
GROUP_NAME=组名称 |
|
||||||
GROUP_TYPE=组类型 |
|
||||||
QUERY_ALERT_GROUP_LIST_NOTES=告警组列表\ |
|
||||||
|
|
||||||
UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组 |
|
||||||
DELETE_ALERT_GROUP_BY_ID_NOTES=删除告警组通过ID |
|
||||||
VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在 |
|
||||||
GRANT_ALERT_GROUP_NOTES=授权告警组 |
|
||||||
USER_IDS=用户ID列表 |
|
||||||
ALERT_GROUP_TAG=告警组相关操作 |
|
||||||
WORKER_GROUP_TAG=Worker分组管理 |
|
||||||
SAVE_WORKER_GROUP_NOTES=创建Worker分组\ |
|
||||||
|
|
||||||
WORKER_GROUP_NAME=Worker分组名称 |
|
||||||
WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割\ |
|
||||||
|
|
||||||
QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理 |
|
||||||
QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组 |
|
||||||
DELETE_WORKER_GROUP_BY_ID_NOTES=删除worker group通过ID |
|
||||||
DATA_ANALYSIS_TAG=任务状态分析相关操作 |
|
||||||
COUNT_TASK_STATE_NOTES=任务状态统计 |
|
||||||
COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态 |
|
||||||
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义 |
|
||||||
COUNT_COMMAND_STATE_NOTES=统计命令状态 |
|
||||||
COUNT_QUEUE_STATE_NOTES=统计队列里任务状态 |
|
||||||
ACCESS_TOKEN_TAG=access token相关操作,需要先登录 |
|
||||||
MONITOR_TAG=监控相关操作 |
|
||||||
MASTER_LIST_NOTES=master服务列表 |
|
||||||
WORKER_LIST_NOTES=worker服务列表 |
|
||||||
QUERY_DATABASE_STATE_NOTES=查询数据库状态 |
|
||||||
QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态 |
|
||||||
TASK_STATE=任务实例状态 |
|
||||||
SOURCE_TABLE=源表 |
|
||||||
DEST_TABLE=目标表 |
|
||||||
TASK_DATE=任务时间 |
|
||||||
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表 |
|
||||||
DATA_SOURCE_TAG=数据源相关操作 |
|
||||||
CREATE_DATA_SOURCE_NOTES=创建数据源 |
|
||||||
DATA_SOURCE_NAME=数据源名称 |
|
||||||
DATA_SOURCE_NOTE=数据源描述 |
|
||||||
DB_TYPE=数据源类型 |
|
||||||
DATA_SOURCE_HOST=IP主机名 |
|
||||||
DATA_SOURCE_PORT=数据源端口 |
|
||||||
DATABASE_NAME=数据库名 |
|
||||||
QUEUE_TAG=队列相关操作 |
|
||||||
QUERY_QUEUE_LIST_NOTES=查询队列列表 |
|
||||||
QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表 |
|
||||||
CREATE_QUEUE_NOTES=创建队列 |
|
||||||
YARN_QUEUE_NAME=hadoop yarn队列名 |
|
||||||
QUEUE_ID=队列ID |
|
||||||
TENANT_DESC=租户描述 |
|
||||||
QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表 |
|
||||||
QUERY_TENANT_LIST_NOTES=查询租户列表 |
|
||||||
UPDATE_TENANT_NOTES=更新租户 |
|
||||||
DELETE_TENANT_NOTES=删除租户 |
|
||||||
RESOURCES_TAG=资源中心相关操作 |
|
||||||
CREATE_RESOURCE_NOTES=创建资源 |
|
||||||
RESOURCE_TYPE=资源文件类型 |
|
||||||
RESOURCE_NAME=资源文件名称 |
|
||||||
RESOURCE_DESC=资源文件描述 |
|
||||||
RESOURCE_FILE=资源文件 |
|
||||||
RESOURCE_ID=资源ID |
|
||||||
QUERY_RESOURCE_LIST_NOTES=查询资源列表 |
|
||||||
DELETE_RESOURCE_BY_ID_NOTES=删除资源通过ID |
|
||||||
VIEW_RESOURCE_BY_ID_NOTES=浏览资源通通过ID |
|
||||||
ONLINE_CREATE_RESOURCE_NOTES=在线创建资源 |
|
||||||
SUFFIX=资源文件后缀 |
|
||||||
CONTENT=资源文件内容 |
|
||||||
UPDATE_RESOURCE_NOTES=在线更新资源文件 |
|
||||||
DOWNLOAD_RESOURCE_NOTES=下载资源文件 |
|
||||||
CREATE_UDF_FUNCTION_NOTES=创建UDF函数 |
|
||||||
UDF_TYPE=UDF类型 |
|
||||||
FUNC_NAME=函数名称 |
|
||||||
CLASS_NAME=包名类名 |
|
||||||
ARG_TYPES=参数 |
|
||||||
UDF_DESC=udf描述,使用说明 |
|
||||||
VIEW_UDF_FUNCTION_NOTES=查看udf函数 |
|
||||||
UPDATE_UDF_FUNCTION_NOTES=更新udf函数 |
|
||||||
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表 |
|
||||||
VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名 |
|
||||||
DELETE_UDF_FUNCTION_NOTES=删除UDF函数 |
|
||||||
AUTHORIZED_FILE_NOTES=授权文件 |
|
||||||
UNAUTHORIZED_FILE_NOTES=取消授权文件 |
|
||||||
AUTHORIZED_UDF_FUNC_NOTES=授权udf函数 |
|
||||||
UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权 |
|
||||||
VERIFY_QUEUE_NOTES=验证队列 |
|
||||||
TENANT_TAG=租户相关操作 |
|
||||||
CREATE_TENANT_NOTES=创建租户 |
|
||||||
TENANT_CODE=租户编码 |
|
||||||
TENANT_NAME=租户名称 |
|
||||||
QUEUE_NAME=队列名 |
|
||||||
PASSWORD=密码 |
|
||||||
DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...} |
|
||||||
PROJECT_TAG=项目相关操作 |
|
||||||
CREATE_PROJECT_NOTES=创建项目 |
|
||||||
PROJECT_DESC=项目描述 |
|
||||||
UPDATE_PROJECT_NOTES=更新项目 |
|
||||||
PROJECT_ID=项目ID |
|
||||||
QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 |
|
||||||
QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 |
|
||||||
QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目 |
|
||||||
DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID |
|
||||||
QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 |
|
||||||
QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 |
|
||||||
TASK_RECORD_TAG=任务记录相关操作 |
|
||||||
QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表 |
|
||||||
CREATE_TOKEN_NOTES=创建token,注意需要先登录 |
|
||||||
QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表 |
|
||||||
SCHEDULE=定时 |
|
||||||
WARNING_TYPE=发送策略 |
|
||||||
WARNING_GROUP_ID=发送组ID |
|
||||||
FAILURE_STRATEGY=失败策略 |
|
||||||
RECEIVERS=收件人 |
|
||||||
RECEIVERS_CC=收件人(抄送) |
|
||||||
WORKER_GROUP_ID=Worker Server分组ID |
|
||||||
PROCESS_INSTANCE_PRIORITY=流程实例优先级 |
|
||||||
UPDATE_SCHEDULE_NOTES=更新定时 |
|
||||||
SCHEDULE_ID=定时ID |
|
||||||
ONLINE_SCHEDULE_NOTES=定时上线 |
|
||||||
OFFLINE_SCHEDULE_NOTES=定时下线 |
|
||||||
QUERY_SCHEDULE_NOTES=查询定时 |
|
||||||
QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时 |
|
||||||
LOGIN_TAG=用户登录相关操作 |
|
||||||
USER_NAME=用户名 |
|
||||||
PROJECT_NAME=项目名称 |
|
||||||
CREATE_PROCESS_DEFINITION_NOTES=创建流程定义 |
|
||||||
PROCESS_DEFINITION_NAME=流程定义名称 |
|
||||||
PROCESS_DEFINITION_JSON=流程定义详细信息(json格式) |
|
||||||
PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式) |
|
||||||
PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式) |
|
||||||
PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式) |
|
||||||
PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式) |
|
||||||
PROCESS_DEFINITION_DESC=流程定义描述信息 |
|
||||||
PROCESS_DEFINITION_TAG=流程定义相关操作 |
|
||||||
SIGNOUT_NOTES=退出登录 |
|
||||||
USER_PASSWORD=用户密码 |
|
||||||
UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例 |
|
||||||
QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表 |
|
||||||
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=验证流程定义名字 |
|
||||||
LOGIN_NOTES=用户登录 |
|
||||||
UPDATE_PROCCESS_DEFINITION_NOTES=更新流程定义 |
|
||||||
PROCESS_DEFINITION_ID=流程定义ID |
|
||||||
RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义 |
|
||||||
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID |
|
||||||
QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表 |
|
||||||
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 |
|
||||||
QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义 |
|
||||||
PAGE_NO=页码号 |
|
||||||
PROCESS_INSTANCE_ID=流程实例ID |
|
||||||
PROCESS_INSTANCE_IDS=流程实例ID集合 |
|
||||||
PROCESS_INSTANCE_JSON=流程实例信息(json格式) |
|
||||||
SCHEDULE_TIME=定时时间 |
|
||||||
SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 |
|
||||||
RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例 |
|
||||||
SEARCH_VAL=搜索值 |
|
||||||
USER_ID=用户ID |
|
||||||
PAGE_SIZE=页大小 |
|
||||||
LIMIT=显示多少条 |
|
||||||
VIEW_TREE_NOTES=树状图 |
|
||||||
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID |
|
||||||
PROCESS_DEFINITION_ID_LIST=流程定义id列表 |
|
||||||
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID |
|
||||||
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合 |
|
||||||
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID |
|
||||||
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID |
|
||||||
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID |
|
||||||
TASK_ID=任务实例ID |
|
||||||
SKIP_LINE_NUM=忽略行数 |
|
||||||
QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志 |
|
||||||
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志 |
|
||||||
USERS_TAG=用户相关操作 |
|
||||||
SCHEDULER_TAG=定时相关操作 |
|
||||||
CREATE_SCHEDULE_NOTES=创建定时 |
|
||||||
CREATE_USER_NOTES=创建用户 |
|
||||||
TENANT_ID=租户ID |
|
||||||
QUEUE=使用的队列 |
|
||||||
EMAIL=邮箱 |
|
||||||
PHONE=手机号 |
|
||||||
QUERY_USER_LIST_NOTES=查询用户列表 |
|
||||||
UPDATE_USER_NOTES=更新用户 |
|
||||||
DELETE_USER_BY_ID_NOTES=删除用户通过ID |
|
||||||
GRANT_PROJECT_NOTES=授权项目 |
|
||||||
PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割) |
|
||||||
GRANT_RESOURCE_NOTES=授权资源文件 |
|
||||||
RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割) |
|
||||||
GET_USER_INFO_NOTES=获取用户信息 |
|
||||||
LIST_USER_NOTES=用户列表 |
|
||||||
VERIFY_USER_NAME_NOTES=验证用户名 |
|
||||||
UNAUTHORIZED_USER_NOTES=取消授权 |
|
||||||
ALERT_GROUP_ID=报警组ID |
|
||||||
AUTHORIZED_USER_NOTES=授权用户 |
|
||||||
GRANT_UDF_FUNC_NOTES=授权udf函数 |
|
||||||
UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割) |
|
||||||
GRANT_DATASOURCE_NOTES=授权数据源 |
|
||||||
DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割) |
|
||||||
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=查询子流程实例通过任务实例ID |
|
||||||
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=查询父流程实例信息通过子流程实例ID |
|
||||||
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量 |
|
||||||
VIEW_GANTT_NOTES=浏览Gantt图 |
|
||||||
SUB_PROCESS_INSTANCE_ID=子流程是咧ID |
|
||||||
TASK_NAME=任务实例名 |
|
||||||
TASK_INSTANCE_TAG=任务实例相关操作 |
|
||||||
LOGGER_TAG=日志相关操作 |
|
||||||
PROCESS_INSTANCE_TAG=流程实例相关操作 |
|
||||||
EXECUTION_STATUS=工作流和任务节点的运行状态 |
|
||||||
HOST=运行任务的主机IP地址 |
|
||||||
START_DATE=开始时间 |
|
||||||
END_DATE=结束时间 |
|
||||||
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表 |
|
||||||
UPDATE_DATA_SOURCE_NOTES=更新数据源 |
|
||||||
DATA_SOURCE_ID=数据源ID |
|
||||||
QUERY_DATA_SOURCE_NOTES=查询数据源通过ID |
|
||||||
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=查询数据源列表通过数据源类型 |
|
||||||
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表 |
|
||||||
CONNECT_DATA_SOURCE_NOTES=连接数据源 |
|
||||||
CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试 |
|
||||||
DELETE_DATA_SOURCE_NOTES=删除数据源 |
|
||||||
VERIFY_DATA_SOURCE_NOTES=验证数据源 |
|
||||||
UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源 |
|
||||||
AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源 |
|
||||||
DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据 |
|
@ -1,17 +0,0 @@ |
|||||||
<#-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> dolphinscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html> |
|
@ -1,38 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
# master execute thread num |
|
||||||
master.exec.threads=100 |
|
||||||
|
|
||||||
# master execute task number in parallel |
|
||||||
master.exec.task.number=20 |
|
||||||
|
|
||||||
# master heartbeat interval |
|
||||||
master.heartbeat.interval=10 |
|
||||||
|
|
||||||
# master commit task retry times |
|
||||||
master.task.commit.retryTimes=5 |
|
||||||
|
|
||||||
# master commit task interval |
|
||||||
master.task.commit.interval=100 |
|
||||||
|
|
||||||
|
|
||||||
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 |
|
||||||
#master.max.cpuload.avg=100 |
|
||||||
|
|
||||||
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G. |
|
||||||
master.reserved.memory=0.1 |
|
@ -1,52 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html --> |
|
||||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> |
|
||||||
<property name="log.base" value="logs" /> |
|
||||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> |
|
||||||
<encoder> |
|
||||||
<pattern> |
|
||||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
|
||||||
</pattern> |
|
||||||
<charset>UTF-8</charset> |
|
||||||
</encoder> |
|
||||||
</appender> |
|
||||||
|
|
||||||
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> |
|
||||||
<file>${log.base}/dolphinscheduler-master.log</file> |
|
||||||
<filter class="org.apache.dolphinscheduler.server.master.log.MasterLogFilter"> |
|
||||||
<level>INFO</level> |
|
||||||
</filter> |
|
||||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> |
|
||||||
<fileNamePattern>${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> |
|
||||||
<maxHistory>168</maxHistory> |
|
||||||
<maxFileSize>200MB</maxFileSize> |
|
||||||
</rollingPolicy> |
|
||||||
<encoder> |
|
||||||
<pattern> |
|
||||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
|
||||||
</pattern> |
|
||||||
<charset>UTF-8</charset> |
|
||||||
</encoder> |
|
||||||
</appender> |
|
||||||
|
|
||||||
<root level="INFO"> |
|
||||||
<appender-ref ref="MASTERLOGFILE"/> |
|
||||||
</root> |
|
||||||
</configuration> |
|
@ -1,33 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper"> |
|
||||||
<select id="selectAccessTokenPage" resultType="org.apache.dolphinscheduler.dao.entity.AccessToken"> |
|
||||||
select * from t_ds_access_token t |
|
||||||
left join t_ds_user u on t.user_id = u.id |
|
||||||
where 1 = 1 |
|
||||||
<if test="userName != null and userName != ''"> |
|
||||||
and u.user_name like concat ('%', #{userName}, '%') |
|
||||||
</if> |
|
||||||
<if test="userId != 0"> |
|
||||||
and t.user_id = #{userId} |
|
||||||
</if> |
|
||||||
order by t.update_time desc |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,47 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper"> |
|
||||||
<select id="queryAlertGroupPage" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
|
||||||
select * from t_ds_alertgroup |
|
||||||
where 1 = 1 |
|
||||||
<if test="groupName != null and groupName != ''"> |
|
||||||
and group_name like concat('%', #{groupName}, '%') |
|
||||||
</if> |
|
||||||
order by update_time desc |
|
||||||
</select> |
|
||||||
<select id="queryByGroupName" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
|
||||||
select * from t_ds_alertgroup |
|
||||||
where group_name=#{groupName} |
|
||||||
</select> |
|
||||||
<select id="queryByUserId" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
|
||||||
select * from t_ds_alertgroup t |
|
||||||
left join t_ds_relation_user_alertgroup r on t.id=r.alertgroup_id |
|
||||||
where r.user_id=#{userId} |
|
||||||
</select> |
|
||||||
<select id="queryByAlertType" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
|
||||||
select * from t_ds_alertgroup |
|
||||||
where group_type=#{alertType} |
|
||||||
</select> |
|
||||||
<select id="queryAllGroupList" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
|
||||||
select * |
|
||||||
from t_ds_alertgroup |
|
||||||
order by update_time desc |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,26 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertMapper"> |
|
||||||
<select id="listAlertByStatus" resultType="org.apache.dolphinscheduler.dao.entity.Alert"> |
|
||||||
select * |
|
||||||
from t_ds_alert |
|
||||||
where alert_status = #{alertStatus} |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,43 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.CommandMapper"> |
|
||||||
<select id="getOneToRun" resultType="org.apache.dolphinscheduler.dao.entity.Command"> |
|
||||||
select command.* from t_ds_command command |
|
||||||
join t_ds_process_definition definition on command.process_definition_id = definition.id |
|
||||||
where definition.release_state = 1 AND definition.flag = 1 |
|
||||||
order by command.update_time asc |
|
||||||
limit 1 |
|
||||||
</select> |
|
||||||
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount"> |
|
||||||
select cmd.command_type as command_type, count(1) as count |
|
||||||
from t_ds_command cmd, t_ds_process_definition process |
|
||||||
where cmd.process_definition_id = process.id |
|
||||||
<if test="projectIdArray != null and projectIdArray.length != 0"> |
|
||||||
and process.project_id in |
|
||||||
<foreach collection="projectIdArray" index="index" item="i" open="(" close=")" separator=","> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
<if test="startTime != null and endTime != null"> |
|
||||||
and cmd.start_time <![CDATA[ >= ]]> #{startTime} and cmd.update_time <![CDATA[ <= ]]> #{endTime} |
|
||||||
</if> |
|
||||||
group by cmd.command_type |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,79 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.DataSourceMapper"> |
|
||||||
<select id="queryDataSourceByType" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
|
||||||
select * |
|
||||||
from t_ds_datasource |
|
||||||
where type=#{type} |
|
||||||
<if test="userId != 0"> |
|
||||||
and id in |
|
||||||
(select datasource_id |
|
||||||
from t_ds_relation_datasource_user |
|
||||||
where user_id=#{userId} |
|
||||||
union select id as datasource_id |
|
||||||
from t_ds_datasource |
|
||||||
where user_id=#{userId} |
|
||||||
) |
|
||||||
</if> |
|
||||||
|
|
||||||
</select> |
|
||||||
|
|
||||||
<select id="selectPaging" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
|
||||||
select * |
|
||||||
from t_ds_datasource |
|
||||||
where 1 =1 |
|
||||||
<if test="userId != 0"> |
|
||||||
and id in |
|
||||||
(select datasource_id |
|
||||||
from t_ds_relation_datasource_user |
|
||||||
where user_id=#{userId} |
|
||||||
union select id as datasource_id |
|
||||||
from t_ds_datasource |
|
||||||
where user_id=#{userId} |
|
||||||
) |
|
||||||
</if> |
|
||||||
<if test="name != null and name != ''"> |
|
||||||
and name like concat ('%', #{name}, '%') |
|
||||||
</if> |
|
||||||
order by update_time desc |
|
||||||
</select> |
|
||||||
<select id="queryDataSourceByName" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
|
||||||
select * |
|
||||||
from t_ds_datasource |
|
||||||
where name=#{name} |
|
||||||
</select> |
|
||||||
<select id="queryAuthedDatasource" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
|
||||||
select datasource.* |
|
||||||
from t_ds_datasource datasource, t_ds_relation_datasource_user rel |
|
||||||
where datasource.id = rel.datasource_id AND rel.user_id = #{userId} |
|
||||||
</select> |
|
||||||
<select id="queryDatasourceExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
|
||||||
select * |
|
||||||
from t_ds_datasource |
|
||||||
where user_id <![CDATA[ <> ]]> #{userId} |
|
||||||
</select> |
|
||||||
<select id="listAllDataSourceByType" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
|
||||||
select * |
|
||||||
from t_ds_datasource |
|
||||||
where type = #{type} |
|
||||||
</select> |
|
||||||
|
|
||||||
|
|
||||||
</mapper> |
|
@ -1,30 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper"> |
|
||||||
<delete id="deleteByUserId"> |
|
||||||
delete from t_ds_relation_datasource_user |
|
||||||
where user_id = #{userId} |
|
||||||
|
|
||||||
</delete> |
|
||||||
<delete id="deleteByDatasourceId"> |
|
||||||
delete from t_ds_relation_datasource_user |
|
||||||
where datasource_id = #{datasourceId} |
|
||||||
</delete> |
|
||||||
</mapper> |
|
@ -1,36 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper"> |
|
||||||
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount"> |
|
||||||
select cmd.command_type as command_type, count(1) as count |
|
||||||
from t_ds_error_command cmd, t_ds_process_definition process |
|
||||||
where cmd.process_definition_id = process.id |
|
||||||
<if test="projectIdArray != null and projectIdArray.length != 0"> |
|
||||||
and process.project_id in |
|
||||||
<foreach collection="projectIdArray" index="index" item="i" open="(" close=")" separator=","> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
<if test="startTime != null and endTime != null"> |
|
||||||
and cmd.startTime <![CDATA[ >= ]]> #{startTime} and cmd.update_time <![CDATA[ <= ]]> #{endTime} |
|
||||||
</if> |
|
||||||
group by cmd.command_type |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,96 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper"> |
|
||||||
<select id="queryByDefineName" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
|
||||||
select pd.*,u.user_name,p.name as project_name,t.tenant_code,t.tenant_name,q.queue,q.queue_name |
|
||||||
from t_ds_process_definition pd |
|
||||||
JOIN t_ds_user u ON pd.user_id = u.id |
|
||||||
JOIN t_ds_project p ON pd.project_id = p.id |
|
||||||
JOIN t_ds_tenant t ON t.id = u.tenant_id |
|
||||||
JOIN t_ds_queue q ON t.queue_id = q.id |
|
||||||
WHERE p.id = #{projectId} |
|
||||||
and pd.name = #{processDefinitionName} |
|
||||||
</select> |
|
||||||
<select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
|
||||||
SELECT td.*,sc.schedule_release_state,tu.user_name |
|
||||||
FROM t_ds_process_definition td |
|
||||||
left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id |
|
||||||
left join t_ds_user tu on td.user_id = tu.id |
|
||||||
where td.project_id = #{projectId} |
|
||||||
<if test=" isAdmin == false "> |
|
||||||
and tu.user_type=1 |
|
||||||
</if> |
|
||||||
<if test=" searchVal != null and searchVal != ''"> |
|
||||||
and td.name like concat('%', #{searchVal}, '%') |
|
||||||
</if> |
|
||||||
<if test=" userId != 0"> |
|
||||||
and td.user_id = #{userId} |
|
||||||
</if> |
|
||||||
order by sc.schedule_release_state desc,td.update_time desc |
|
||||||
</select> |
|
||||||
|
|
||||||
<select id="queryAllDefinitionList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
|
||||||
select * |
|
||||||
from t_ds_process_definition |
|
||||||
where project_id = #{projectId} |
|
||||||
order by create_time desc |
|
||||||
</select> |
|
||||||
<select id="queryDefinitionListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
|
||||||
select * |
|
||||||
from t_ds_process_definition |
|
||||||
where tenant_id = #{tenantId} |
|
||||||
</select> |
|
||||||
<select id="queryDefinitionListByIdList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
|
||||||
select * |
|
||||||
from t_ds_process_definition |
|
||||||
where id in |
|
||||||
<foreach collection="ids" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</select> |
|
||||||
<select id="countDefinitionGroupByUser" resultType="org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser"> |
|
||||||
SELECT td.user_id as user_id, tu.user_name as user_name, count(0) as count |
|
||||||
FROM t_ds_process_definition td |
|
||||||
JOIN t_ds_user tu on tu.id=td.user_id |
|
||||||
where 1 = 1 |
|
||||||
<if test=" isAdmin == false "> |
|
||||||
and tu.user_type=1 |
|
||||||
</if> |
|
||||||
<if test="projectIds != null and projectIds.length != 0"> |
|
||||||
and td.project_id in |
|
||||||
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
group by td.user_id,tu.user_name |
|
||||||
</select> |
|
||||||
<select id="queryByDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
|
||||||
SELECT |
|
||||||
pd.*, u.user_name, |
|
||||||
p.name AS project_name |
|
||||||
FROM |
|
||||||
t_ds_process_definition pd, |
|
||||||
t_ds_user u, |
|
||||||
t_ds_project p |
|
||||||
WHERE |
|
||||||
pd.user_id = u.id AND pd.project_id = p.id |
|
||||||
AND pd.id = #{processDefineId} |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,43 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper"> |
|
||||||
<delete id="deleteByParentProcessId"> |
|
||||||
delete |
|
||||||
from t_ds_relation_process_instance |
|
||||||
where parent_process_instance_id=#{parentProcessId} |
|
||||||
|
|
||||||
</delete> |
|
||||||
<select id="queryByParentId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap"> |
|
||||||
select * |
|
||||||
from t_ds_relation_process_instance |
|
||||||
where parent_process_instance_id = #{parentProcessId} |
|
||||||
and parent_task_instance_id = #{parentTaskId} |
|
||||||
</select> |
|
||||||
<select id="queryBySubProcessId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap"> |
|
||||||
select * |
|
||||||
from t_ds_relation_process_instance |
|
||||||
where process_instance_id = #{subProcessId} |
|
||||||
</select> |
|
||||||
<select id="querySubIdListByParentId" resultType="java.lang.Integer"> |
|
||||||
select process_instance_id |
|
||||||
from t_ds_relation_process_instance |
|
||||||
where parent_process_instance_id = #{parentInstanceId} |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,182 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper"> |
|
||||||
<select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
|
||||||
select inst.* |
|
||||||
from t_ds_process_instance inst |
|
||||||
where inst.id = #{processId} |
|
||||||
</select> |
|
||||||
<select id="queryByHostAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
|
||||||
select * |
|
||||||
from t_ds_process_instance |
|
||||||
where 1=1 |
|
||||||
<if test="host != null and host != ''"> |
|
||||||
and host=#{host} |
|
||||||
</if> |
|
||||||
and state in |
|
||||||
<foreach collection="states" item="i" open="(" close=")" separator=","> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
order by id asc |
|
||||||
</select> |
|
||||||
|
|
||||||
<select id="queryByTenantIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
|
||||||
select * |
|
||||||
from t_ds_process_instance |
|
||||||
where 1=1 |
|
||||||
<if test="tenantId != -1"> |
|
||||||
and tenant_id =#{tenantId} |
|
||||||
</if> |
|
||||||
and state in |
|
||||||
<foreach collection="states" item="i" open="(" close=")" separator=","> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
order by id asc |
|
||||||
</select> |
|
||||||
|
|
||||||
<select id="queryByWorkerGroupIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
|
||||||
select * |
|
||||||
from t_ds_process_instance |
|
||||||
where 1=1 |
|
||||||
<if test="workerGroupId != -1"> |
|
||||||
and worker_group_id =#{workerGroupId} |
|
||||||
</if> |
|
||||||
and state in |
|
||||||
<foreach collection="states" item="i" open="(" close=")" separator=","> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
order by id asc |
|
||||||
</select> |
|
||||||
|
|
||||||
<select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
|
||||||
select instance.* |
|
||||||
from t_ds_process_instance instance |
|
||||||
join t_ds_process_definition define ON instance.process_definition_id = define.id |
|
||||||
where 1=1 |
|
||||||
and instance.is_sub_process=0 |
|
||||||
and define.project_id = #{projectId} |
|
||||||
<if test="processDefinitionId != 0"> |
|
||||||
and instance.process_definition_id = #{processDefinitionId} |
|
||||||
</if> |
|
||||||
<if test="searchVal != null and searchVal != ''"> |
|
||||||
and instance.name like concat('%', #{searchVal}, '%') |
|
||||||
</if> |
|
||||||
<if test="startTime != null "> |
|
||||||
and instance.start_time > #{startTime} and instance.start_time <![CDATA[ <=]]> #{endTime} |
|
||||||
</if> |
|
||||||
<if test="states != null and states != ''"> |
|
||||||
and instance.state in |
|
||||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
<if test="host != null and host != ''"> |
|
||||||
and instance.host like concat('%', #{host}, '%') |
|
||||||
</if> |
|
||||||
order by instance.start_time desc |
|
||||||
</select> |
|
||||||
<update id="setFailoverByHostAndStateArray"> |
|
||||||
update t_ds_process_instance |
|
||||||
set host=null |
|
||||||
where host =#{host} and state in |
|
||||||
<foreach collection="states" index="index" item="i" open="(" close=")" separator=","> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</update> |
|
||||||
<update id="updateProcessInstanceByState"> |
|
||||||
update t_ds_process_instance |
|
||||||
set state = #{destState} |
|
||||||
where state = #{originState} |
|
||||||
</update> |
|
||||||
|
|
||||||
<update id="updateProcessInstanceByTenantId"> |
|
||||||
update t_ds_process_instance |
|
||||||
set tenant_id = #{destTenantId} |
|
||||||
where tenant_id = #{originTenantId} |
|
||||||
</update> |
|
||||||
|
|
||||||
<update id="updateProcessInstanceByWorkerGroupId"> |
|
||||||
update t_ds_process_instance |
|
||||||
set worker_group_id = #{destWorkerGroupId} |
|
||||||
where worker_group_id = #{originWorkerGroupId} |
|
||||||
</update> |
|
||||||
|
|
||||||
<select id="countInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount"> |
|
||||||
select t.state, count(0) as count |
|
||||||
from t_ds_process_instance t |
|
||||||
join t_ds_process_definition d on d.id=t.process_definition_id |
|
||||||
join t_ds_project p on p.id=d.project_id |
|
||||||
where 1 = 1 |
|
||||||
and t.is_sub_process = 0 |
|
||||||
<if test="startTime != null and endTime != null"> |
|
||||||
and t.start_time >= #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime} |
|
||||||
</if> |
|
||||||
<if test="projectIds != null and projectIds.length != 0"> |
|
||||||
and p.id in |
|
||||||
<foreach collection="projectIds" index="index" item="i" open="(" close=")" separator=","> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
group by t.state |
|
||||||
</select> |
|
||||||
<select id="queryByProcessDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
|
||||||
select * |
|
||||||
from t_ds_process_instance |
|
||||||
where process_definition_id=#{processDefinitionId} |
|
||||||
order by start_time desc limit #{size} |
|
||||||
</select> |
|
||||||
<select id="queryLastSchedulerProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
|
||||||
select * |
|
||||||
from t_ds_process_instance |
|
||||||
where process_definition_id=#{processDefinitionId} |
|
||||||
<if test="startTime!=null and endTime != null "> |
|
||||||
and schedule_time between #{startTime} and #{endTime} |
|
||||||
</if> |
|
||||||
order by end_time desc limit 1 |
|
||||||
</select> |
|
||||||
<select id="queryLastRunningProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
|
||||||
select * |
|
||||||
from t_ds_process_instance |
|
||||||
where 1=1 |
|
||||||
<if test="states !=null and states.length != 0"> |
|
||||||
and state in |
|
||||||
<foreach collection="states" item="i" index="index" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
<if test="startTime!=null and endTime != null "> |
|
||||||
and process_definition_id=#{processDefinitionId} |
|
||||||
and (schedule_time between #{startTime} and #{endTime} or start_time between #{startTime} and #{endTime}) |
|
||||||
</if> |
|
||||||
order by start_time desc limit 1 |
|
||||||
</select> |
|
||||||
<select id="queryLastManualProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
|
||||||
select * |
|
||||||
from t_ds_process_instance |
|
||||||
where process_definition_id=#{processDefinitionId} |
|
||||||
and schedule_time is null |
|
||||||
<if test="startTime!=null and endTime != null "> |
|
||||||
and start_time between #{startTime} and #{endTime} |
|
||||||
</if> |
|
||||||
order by end_time desc limit 1 |
|
||||||
</select> |
|
||||||
|
|
||||||
|
|
||||||
</mapper> |
|
@ -1,68 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectMapper"> |
|
||||||
<select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
|
||||||
select p.*,u.user_name as user_name |
|
||||||
from t_ds_project p |
|
||||||
join t_ds_user u on p.user_id = u.id |
|
||||||
where p.id = #{projectId} |
|
||||||
</select> |
|
||||||
<select id="queryByName" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
|
||||||
select p.*,u.user_name as user_name |
|
||||||
from t_ds_project p |
|
||||||
join t_ds_user u on p.user_id = u.id |
|
||||||
where p.name = #{projectName} |
|
||||||
limit 1 |
|
||||||
</select> |
|
||||||
<select id="queryProjectListPaging" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
|
||||||
select p.*,u.user_name as user_name, |
|
||||||
(SELECT COUNT(*) FROM t_ds_process_definition AS def WHERE def.project_id = p.id) AS def_count, |
|
||||||
(SELECT COUNT(*) FROM t_ds_process_definition def, t_ds_process_instance inst WHERE def.id = inst.process_definition_id AND def.project_id = p.id AND inst.state=1 ) as inst_running_count |
|
||||||
from t_ds_project p |
|
||||||
join t_ds_user u on u.id=p.user_id |
|
||||||
where 1=1 |
|
||||||
<if test="userId != 0"> |
|
||||||
and p.id in |
|
||||||
(select project_id from t_ds_relation_project_user where user_id=#{userId} |
|
||||||
union select id as project_id from t_ds_project where user_id=#{userId} |
|
||||||
) |
|
||||||
</if> |
|
||||||
<if test="searchName!=null and searchName != ''"> |
|
||||||
and p.name like concat('%', #{searchName}, '%') |
|
||||||
</if> |
|
||||||
order by p.create_time desc |
|
||||||
</select> |
|
||||||
<select id="queryAuthedProjectListByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
|
||||||
select p.* |
|
||||||
from t_ds_project p,t_ds_relation_project_user rel |
|
||||||
where p.id = rel.project_id and rel.user_id= #{userId} |
|
||||||
</select> |
|
||||||
<select id="queryProjectExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
|
||||||
select * |
|
||||||
from t_ds_project |
|
||||||
where user_id <![CDATA[ <> ]]> #{userId} |
|
||||||
</select> |
|
||||||
<select id="queryProjectCreatedByUser" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
|
||||||
select * |
|
||||||
from t_ds_project |
|
||||||
where user_id = #{userId} |
|
||||||
</select> |
|
||||||
|
|
||||||
</mapper> |
|
@ -1,36 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper"> |
|
||||||
<delete id="deleteProjectRelation"> |
|
||||||
delete from t_ds_relation_project_user |
|
||||||
where 1=1 |
|
||||||
and user_id = #{userId} |
|
||||||
<if test="projectId != 0 "> |
|
||||||
and project_id = #{projectId} |
|
||||||
</if> |
|
||||||
</delete> |
|
||||||
<select id="queryProjectRelation" resultType="org.apache.dolphinscheduler.dao.entity.ProjectUser"> |
|
||||||
select * |
|
||||||
from t_ds_relation_project_user |
|
||||||
where project_id = #{projectId} |
|
||||||
and user_id = #{userId} |
|
||||||
limit 1 |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,42 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.QueueMapper"> |
|
||||||
<select id="queryQueuePaging" resultType="org.apache.dolphinscheduler.dao.entity.Queue"> |
|
||||||
select * |
|
||||||
from t_ds_queue |
|
||||||
where 1= 1 |
|
||||||
<if test="searchVal != null and searchVal != ''"> |
|
||||||
and queue_name like concat('%', #{searchVal}, '%') |
|
||||||
</if> |
|
||||||
order by update_time desc |
|
||||||
</select> |
|
||||||
<select id="queryAllQueueList" resultType="org.apache.dolphinscheduler.dao.entity.Queue"> |
|
||||||
select * |
|
||||||
from t_ds_queue |
|
||||||
where 1=1 |
|
||||||
<if test="queue != null and queue != ''"> |
|
||||||
and queue = #{queue} |
|
||||||
</if> |
|
||||||
<if test="queueName != null and queueName != ''"> |
|
||||||
and queue_name =#{queueName} |
|
||||||
</if> |
|
||||||
</select> |
|
||||||
|
|
||||||
</mapper> |
|
@ -1,74 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ResourceMapper"> |
|
||||||
<select id="queryResourceList" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
|
||||||
select * |
|
||||||
from t_ds_resources |
|
||||||
where 1= 1 |
|
||||||
<if test="alias != null and alias != ''"> |
|
||||||
and alias = #{alias} |
|
||||||
</if> |
|
||||||
<if test="type != -1"> |
|
||||||
and type = #{type} |
|
||||||
</if> |
|
||||||
<if test="userId != 0"> |
|
||||||
and user_id = #{userId} |
|
||||||
</if> |
|
||||||
</select> |
|
||||||
<select id="queryResourceListAuthored" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
|
||||||
select * |
|
||||||
from t_ds_resources |
|
||||||
where 1 = 1 |
|
||||||
<if test="type != -1"> |
|
||||||
and type=#{type} |
|
||||||
</if> |
|
||||||
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId} |
|
||||||
union select id as resources_id from t_ds_resources where user_id=#{userId}) |
|
||||||
</select> |
|
||||||
<select id="queryResourcePaging" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
|
||||||
select * |
|
||||||
from t_ds_resources |
|
||||||
where type=#{type} |
|
||||||
<if test="userId != 0"> |
|
||||||
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId} |
|
||||||
union select id as resources_id from t_ds_resources where user_id=#{userId}) |
|
||||||
</if> |
|
||||||
<if test="searchVal != null and searchVal != ''"> |
|
||||||
and alias like concat('%', #{searchVal}, '%') |
|
||||||
</if> |
|
||||||
order by update_time desc |
|
||||||
</select> |
|
||||||
<select id="queryAuthorizedResourceList" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
|
||||||
select r.* |
|
||||||
from t_ds_resources r,t_ds_relation_resources_user rel |
|
||||||
where r.id = rel.resources_id AND rel.user_id = #{userId} |
|
||||||
</select> |
|
||||||
<select id="queryResourceExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
|
||||||
select * |
|
||||||
from t_ds_resources |
|
||||||
where user_id <![CDATA[ <> ]]> #{userId} |
|
||||||
</select> |
|
||||||
<select id="queryTenantCodeByResourceName" resultType="java.lang.String"> |
|
||||||
select tenant_code |
|
||||||
from t_ds_tenant t, t_ds_user u, t_ds_resources res |
|
||||||
where t.id = u.tenant_id and u.id = res.user_id and res.type=0 |
|
||||||
and res.alias= #{resName} |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,32 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper"> |
|
||||||
<delete id="deleteResourceUser"> |
|
||||||
delete |
|
||||||
from t_ds_relation_resources_user |
|
||||||
where 1 = 1 |
|
||||||
<if test="userId != 0"> |
|
||||||
and user_id = #{userId} |
|
||||||
</if> |
|
||||||
<if test="resourceId != 0"> |
|
||||||
and resources_id = #{resourceId} |
|
||||||
</if> |
|
||||||
</delete> |
|
||||||
</mapper> |
|
@ -1,58 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ScheduleMapper"> |
|
||||||
<select id="queryByProcessDefineIdPaging" resultType="org.apache.dolphinscheduler.dao.entity.Schedule"> |
|
||||||
select p_f.name as process_definition_name, p.name as project_name,u.user_name,s.* |
|
||||||
from t_ds_schedules s |
|
||||||
join t_ds_process_definition p_f on s.process_definition_id = p_f.id |
|
||||||
join t_ds_project as p on p_f.project_id = p.id |
|
||||||
join t_ds_user as u on s.user_id = u.id |
|
||||||
where 1=1 |
|
||||||
<if test="processDefinitionId!= 0"> |
|
||||||
and s.process_definition_id = #{processDefinitionId} |
|
||||||
</if> |
|
||||||
order by s.update_time desc |
|
||||||
</select> |
|
||||||
<select id="querySchedulerListByProjectName" resultType="org.apache.dolphinscheduler.dao.entity.Schedule"> |
|
||||||
select p_f.name as process_definition_name, p_f.description as definition_description, p.name as project_name,u.user_name,s.* |
|
||||||
from t_ds_schedules s |
|
||||||
join t_ds_process_definition p_f on s.process_definition_id = p_f.id |
|
||||||
join t_ds_project as p on p_f.project_id = p.id |
|
||||||
join t_ds_user as u on s.user_id = u.id |
|
||||||
where p.name = #{projectName} |
|
||||||
</select> |
|
||||||
<select id="selectAllByProcessDefineArray" resultType="org.apache.dolphinscheduler.dao.entity.Schedule"> |
|
||||||
select * |
|
||||||
from t_ds_schedules |
|
||||||
where 1= 1 |
|
||||||
<if test="processDefineIds != null and processDefineIds.length != 0 "> |
|
||||||
and process_definition_id in |
|
||||||
<foreach collection="processDefineIds" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
and release_state = 1 |
|
||||||
</select> |
|
||||||
<select id="queryByProcessDefinitionId" resultType="org.apache.dolphinscheduler.dao.entity.Schedule"> |
|
||||||
select * |
|
||||||
from t_ds_schedules |
|
||||||
where process_definition_id =#{processDefinitionId} |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,32 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.SessionMapper"> |
|
||||||
<select id="queryByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Session"> |
|
||||||
select * |
|
||||||
from t_ds_session |
|
||||||
where user_id = #{userId} |
|
||||||
</select> |
|
||||||
|
|
||||||
<select id="queryByUserIdAndIp" resultType="org.apache.dolphinscheduler.dao.entity.Session"> |
|
||||||
select * |
|
||||||
from t_ds_session |
|
||||||
where user_id = #{userId} AND ip = #{ip} |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,129 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper"> |
|
||||||
<update id="setFailoverByHostAndStateArray"> |
|
||||||
update t_ds_task_instance |
|
||||||
set state = #{destStatus} |
|
||||||
where host = #{host} |
|
||||||
and state in |
|
||||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</update> |
|
||||||
<select id="queryTaskByProcessIdAndState" resultType="java.lang.Integer"> |
|
||||||
select id |
|
||||||
from t_ds_task_instance |
|
||||||
WHERE process_instance_id = #{processInstanceId} |
|
||||||
and state = #{state} |
|
||||||
and flag = 1 |
|
||||||
</select> |
|
||||||
<select id="findValidTaskListByProcessId" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance"> |
|
||||||
select * |
|
||||||
from t_ds_task_instance |
|
||||||
WHERE process_instance_id = #{processInstanceId} |
|
||||||
and flag = #{flag} |
|
||||||
order by start_time desc |
|
||||||
</select> |
|
||||||
<select id="queryByHostAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance"> |
|
||||||
select * |
|
||||||
from t_ds_task_instance |
|
||||||
where 1 = 1 |
|
||||||
<if test="host != null and host != ''"> |
|
||||||
and host = #{host} |
|
||||||
</if> |
|
||||||
<if test="states != null and states.length != 0"> |
|
||||||
and state in |
|
||||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
</select> |
|
||||||
<select id="countTaskInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount"> |
|
||||||
select state, count(0) as count |
|
||||||
from t_ds_task_instance t |
|
||||||
left join t_ds_process_definition d on d.id=t.process_definition_id |
|
||||||
left join t_ds_project p on p.id=d.project_id |
|
||||||
where 1=1 |
|
||||||
<if test="projectIds != null and projectIds.length != 0"> |
|
||||||
and d.project_id in |
|
||||||
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
<if test="startTime != null and endTime != null"> |
|
||||||
and t.start_time > #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime} |
|
||||||
</if> |
|
||||||
group by t.state |
|
||||||
</select> |
|
||||||
<select id="queryByInstanceIdAndName" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance"> |
|
||||||
select * |
|
||||||
from t_ds_task_instance |
|
||||||
where process_instance_id = #{processInstanceId} |
|
||||||
and name = #{name} |
|
||||||
and flag = 1 |
|
||||||
limit 1 |
|
||||||
</select> |
|
||||||
<select id="countTask" resultType="java.lang.Integer"> |
|
||||||
select count(1) as count |
|
||||||
from t_ds_task_instance task,t_ds_process_definition process |
|
||||||
where task.process_definition_id=process.id |
|
||||||
<if test="projectIds != null and projectIds.length != 0"> |
|
||||||
and process.project_id in |
|
||||||
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
<if test="taskIds != null and taskIds.length != 0"> |
|
||||||
and task.id in |
|
||||||
<foreach collection="taskIds" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
</select> |
|
||||||
<select id="queryTaskInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance"> |
|
||||||
select instance.*,process.name as process_instance_name |
|
||||||
from t_ds_task_instance instance |
|
||||||
join t_ds_process_definition define ON instance.process_definition_id = define.id |
|
||||||
join t_ds_process_instance process on process.id=instance.process_instance_id |
|
||||||
where define.project_id = #{projectId} |
|
||||||
<if test="startTime != null"> |
|
||||||
and instance.start_time > #{startTime} and instance.start_time <![CDATA[ <=]]> #{endTime} |
|
||||||
</if> |
|
||||||
<if test="processInstanceId != 0"> |
|
||||||
and instance.process_instance_id = #{processInstanceId} |
|
||||||
</if> |
|
||||||
<if test="searchVal != null and searchVal != ''"> |
|
||||||
and instance.name like concat('%', #{searchVal}, '%') |
|
||||||
</if> |
|
||||||
<if test="taskName != null and taskName != ''"> |
|
||||||
and instance.name=#{taskName} |
|
||||||
</if> |
|
||||||
<if test="states != null and states.length != 0"> |
|
||||||
and instance.state in |
|
||||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")"> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
<if test="host != null and host != ''"> |
|
||||||
and instance.host like concat('%', #{host}, '%') |
|
||||||
</if> |
|
||||||
order by instance.start_time desc |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,41 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.TenantMapper"> |
|
||||||
<select id="queryById" resultType="org.apache.dolphinscheduler.dao.entity.Tenant"> |
|
||||||
SELECT t.*,q.queue_name,q.queue |
|
||||||
FROM t_ds_tenant t,t_ds_queue q |
|
||||||
WHERE t.queue_id = q.id |
|
||||||
and t.id = #{tenantId} |
|
||||||
</select> |
|
||||||
<select id="queryByTenantCode" resultType="org.apache.dolphinscheduler.dao.entity.Tenant"> |
|
||||||
select * |
|
||||||
from t_ds_tenant |
|
||||||
where tenant_code = #{tenantCode} |
|
||||||
</select> |
|
||||||
<select id="queryTenantPaging" resultType="org.apache.dolphinscheduler.dao.entity.Tenant"> |
|
||||||
SELECT t.*,q.queue_name |
|
||||||
FROM t_ds_tenant t,t_ds_queue q |
|
||||||
WHERE t.queue_id = q.id |
|
||||||
<if test="searchVal != null and searchVal != ''"> |
|
||||||
and t.tenant_name like concat('%', #{searchVal}, '%') |
|
||||||
</if> |
|
||||||
order by t.update_time desc |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,29 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UDFUserMapper"> |
|
||||||
<delete id="deleteByUserId"> |
|
||||||
delete from t_ds_relation_udfs_user |
|
||||||
where user_id = #{userId} |
|
||||||
</delete> |
|
||||||
<delete id="deleteByUdfFuncId"> |
|
||||||
delete from t_ds_relation_udfs_user |
|
||||||
where udf_id = #{udfFuncId} |
|
||||||
</delete> |
|
||||||
</mapper> |
|
@ -1,71 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper"> |
|
||||||
<select id="queryUdfByIdStr" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
|
||||||
select * |
|
||||||
from t_ds_udfs |
|
||||||
where 1 = 1 |
|
||||||
<if test="ids != null and ids != ''"> |
|
||||||
and id in |
|
||||||
<foreach collection="ids" item="i" open="(" close=")" separator=","> |
|
||||||
#{i} |
|
||||||
</foreach> |
|
||||||
</if> |
|
||||||
<if test="funcNames != null and funcNames != ''"> |
|
||||||
and func_name = #{funcNames} |
|
||||||
</if> |
|
||||||
order by id asc |
|
||||||
</select> |
|
||||||
<select id="queryUdfFuncPaging" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
|
||||||
select * |
|
||||||
from t_ds_udfs |
|
||||||
where 1=1 |
|
||||||
<if test="searchVal!= null and searchVal != ''"> |
|
||||||
and func_name like concat('%', #{searchVal}, '%') |
|
||||||
</if> |
|
||||||
<if test="userId != 0"> |
|
||||||
and id in ( |
|
||||||
select udf_id from t_ds_relation_udfs_user where user_id=#{userId} |
|
||||||
union select id as udf_id from t_ds_udfs where user_id=#{userId}) |
|
||||||
</if> |
|
||||||
order by create_time desc |
|
||||||
</select> |
|
||||||
<select id="getUdfFuncByType" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
|
||||||
select * |
|
||||||
from t_ds_udfs |
|
||||||
where type=#{type} |
|
||||||
<if test="userId != 0"> |
|
||||||
and id in ( |
|
||||||
select udf_id from t_ds_relation_udfs_user where user_id=#{userId} |
|
||||||
union select id as udf_id from t_ds_udfs where user_id=#{userId}) |
|
||||||
</if> |
|
||||||
</select> |
|
||||||
<select id="queryUdfFuncExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
|
||||||
select * |
|
||||||
from t_ds_udfs |
|
||||||
where user_id <![CDATA[ <> ]]> #{userId} |
|
||||||
</select> |
|
||||||
<select id="queryAuthedUdfFunc" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
|
||||||
SELECT u.* |
|
||||||
from t_ds_udfs u,t_ds_relation_udfs_user rel |
|
||||||
WHERE u.id = rel.udf_id |
|
||||||
AND rel.user_id = #{userId} |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,31 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserAlertGroupMapper"> |
|
||||||
<delete id="deleteByAlertgroupId"> |
|
||||||
delete from t_ds_relation_user_alertgroup |
|
||||||
where alertgroup_id = #{alertgroupId} |
|
||||||
</delete> |
|
||||||
<select id="listUserByAlertgroupId" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
SELECT u.* |
|
||||||
FROM t_ds_relation_user_alertgroup g_u |
|
||||||
JOIN t_ds_user u on g_u.user_id = u.id |
|
||||||
WHERE g_u.alertgroup_id = #{alertgroupId} |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,72 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserMapper"> |
|
||||||
<select id="queryAllGeneralUser" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
select * from t_ds_user |
|
||||||
where user_type=1; |
|
||||||
</select> |
|
||||||
<select id="queryByUserNameAccurately" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
select * from t_ds_user |
|
||||||
where user_name=#{userName} |
|
||||||
</select> |
|
||||||
<select id="queryUserByNamePassword" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
select * from t_ds_user |
|
||||||
where user_name=#{userName} and user_password = #{password} |
|
||||||
</select> |
|
||||||
<select id="queryUserPaging" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
select u.id,u.user_name,u.user_password,u.user_type,u.email,u.phone,u.tenant_id,u.create_time, |
|
||||||
u.update_time,t.tenant_name, |
|
||||||
case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue, q.queue_name |
|
||||||
from t_ds_user u |
|
||||||
left join t_ds_tenant t on u.tenant_id=t.id |
|
||||||
left join t_ds_queue q on t.queue_id = q.id |
|
||||||
where 1=1 |
|
||||||
<if test="userName!=null and userName != ''" > |
|
||||||
and u.user_name like concat ('%', #{userName}, '%') |
|
||||||
</if> |
|
||||||
order by u.update_time desc |
|
||||||
</select> |
|
||||||
<select id="queryDetailsById" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
select u.*, t.tenant_name, |
|
||||||
case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue_name |
|
||||||
from t_ds_user u,t_ds_tenant t,t_ds_queue q |
|
||||||
WHERE u.tenant_id = t.id and t.queue_id = q.id and u.id = #{userId} |
|
||||||
</select> |
|
||||||
<select id="queryUserListByAlertGroupId" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
select u.* |
|
||||||
from t_ds_user u, t_ds_relation_user_alertgroup rel |
|
||||||
where u.id = rel.user_id AND rel.alertgroup_id = #{alertgroupId} |
|
||||||
</select> |
|
||||||
<select id="queryUserListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
select * |
|
||||||
from t_ds_user |
|
||||||
where tenant_id = #{tenantId} |
|
||||||
</select> |
|
||||||
<select id="queryTenantCodeByUserId" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
SELECT u.*,t.tenant_code |
|
||||||
FROM t_ds_user u, t_ds_tenant t |
|
||||||
WHERE u.tenant_id = t.id AND u.id = #{userId} |
|
||||||
</select> |
|
||||||
<select id="queryUserByToken" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
|
||||||
select u.* |
|
||||||
from t_ds_user u ,t_ds_access_token t |
|
||||||
where u.id = t.user_id and token=#{token} and t.expire_time > NOW() |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,40 +0,0 @@ |
|||||||
<?xml version="1.0" encoding="UTF-8" ?> |
|
||||||
<!-- |
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
~ contributor license agreements. See the NOTICE file distributed with |
|
||||||
~ this work for additional information regarding copyright ownership. |
|
||||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
~ (the "License"); you may not use this file except in compliance with |
|
||||||
~ the License. You may obtain a copy of the License at |
|
||||||
~ |
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
~ |
|
||||||
~ Unless required by applicable law or agreed to in writing, software |
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
~ See the License for the specific language governing permissions and |
|
||||||
~ limitations under the License. |
|
||||||
--> |
|
||||||
|
|
||||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
|
||||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper"> |
|
||||||
<select id="queryAllWorkerGroup" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup"> |
|
||||||
select * |
|
||||||
from t_ds_worker_group |
|
||||||
order by update_time desc |
|
||||||
</select> |
|
||||||
<select id="queryWorkerGroupByName" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup"> |
|
||||||
select * |
|
||||||
from t_ds_worker_group |
|
||||||
where name = #{name} |
|
||||||
</select> |
|
||||||
<select id="queryListPaging" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup"> |
|
||||||
select * |
|
||||||
from t_ds_worker_group |
|
||||||
where 1 = 1 |
|
||||||
<if test="searchVal != null and searchVal != ''"> |
|
||||||
and name like concat('%', #{searchVal}, '%') |
|
||||||
</if> |
|
||||||
order by update_time desc |
|
||||||
</select> |
|
||||||
</mapper> |
|
@ -1,56 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
#============================================================================ |
|
||||||
# Configure Main Scheduler Properties |
|
||||||
#============================================================================ |
|
||||||
org.quartz.scheduler.instanceName = DolphinScheduler |
|
||||||
org.quartz.scheduler.instanceId = AUTO |
|
||||||
org.quartz.scheduler.makeSchedulerThreadDaemon = true |
|
||||||
org.quartz.jobStore.useProperties = false |
|
||||||
|
|
||||||
#============================================================================ |
|
||||||
# Configure ThreadPool |
|
||||||
#============================================================================ |
|
||||||
|
|
||||||
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool |
|
||||||
org.quartz.threadPool.makeThreadsDaemons = true |
|
||||||
org.quartz.threadPool.threadCount = 25 |
|
||||||
org.quartz.threadPool.threadPriority = 5 |
|
||||||
|
|
||||||
#============================================================================ |
|
||||||
# Configure JobStore |
|
||||||
#============================================================================ |
|
||||||
|
|
||||||
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX |
|
||||||
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate |
|
||||||
org.quartz.jobStore.tablePrefix = QRTZ_ |
|
||||||
org.quartz.jobStore.isClustered = true |
|
||||||
org.quartz.jobStore.misfireThreshold = 60000 |
|
||||||
org.quartz.jobStore.clusterCheckinInterval = 5000 |
|
||||||
org.quartz.jobStore.dataSource = myDs |
|
||||||
|
|
||||||
#============================================================================ |
|
||||||
# Configure Datasources |
|
||||||
#============================================================================ |
|
||||||
org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.server.quartz.DruidConnectionProvider |
|
||||||
org.quartz.dataSource.myDs.driver = org.postgresql.Driver |
|
||||||
org.quartz.dataSource.myDs.URL=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler |
|
||||||
org.quartz.dataSource.myDs.user=root |
|
||||||
org.quartz.dataSource.myDs.password=root@123 |
|
||||||
org.quartz.dataSource.myDs.maxConnections = 10 |
|
||||||
org.quartz.dataSource.myDs.validationQuery = select 1 |
|
@ -1,32 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
# worker execute thread num |
|
||||||
worker.exec.threads=100 |
|
||||||
|
|
||||||
# worker heartbeat interval |
|
||||||
worker.heartbeat.interval=10 |
|
||||||
|
|
||||||
# submit the number of tasks at a time |
|
||||||
worker.fetch.task.num = 3 |
|
||||||
|
|
||||||
|
|
||||||
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 |
|
||||||
#worker.max.cpuload.avg=10 |
|
||||||
|
|
||||||
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G. |
|
||||||
worker.reserved.memory=1 |
|
@ -1,42 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
#zookeeper cluster |
|
||||||
zookeeper.quorum=127.0.0.1:2181 |
|
||||||
|
|
||||||
#dolphinscheduler root directory |
|
||||||
zookeeper.dolphinscheduler.root=/dolphinscheduler |
|
||||||
|
|
||||||
#zookeeper server dirctory |
|
||||||
zookeeper.dolphinscheduler.dead.servers=/dolphinscheduler/dead-servers |
|
||||||
zookeeper.dolphinscheduler.masters=/dolphinscheduler/masters |
|
||||||
zookeeper.dolphinscheduler.workers=/dolphinscheduler/workers |
|
||||||
|
|
||||||
#zookeeper lock dirctory |
|
||||||
zookeeper.dolphinscheduler.lock.masters=/dolphinscheduler/lock/masters |
|
||||||
zookeeper.dolphinscheduler.lock.workers=/dolphinscheduler/lock/workers |
|
||||||
|
|
||||||
#dolphinscheduler failover directory |
|
||||||
zookeeper.dolphinscheduler.lock.failover.masters=/dolphinscheduler/lock/failover/masters |
|
||||||
zookeeper.dolphinscheduler.lock.failover.workers=/dolphinscheduler/lock/failover/workers |
|
||||||
zookeeper.dolphinscheduler.lock.failover.startup.masters=/dolphinscheduler/lock/failover/startup-masters |
|
||||||
|
|
||||||
#dolphinscheduler failover directory |
|
||||||
zookeeper.session.timeout=300 |
|
||||||
zookeeper.connection.timeout=300 |
|
||||||
zookeeper.retry.sleep=1000 |
|
||||||
zookeeper.retry.maxtime=5 |
|
@ -0,0 +1,30 @@ |
|||||||
|
#alert type is EMAIL/SMS |
||||||
|
alert.type=EMAIL |
||||||
|
|
||||||
|
# mail server configuration |
||||||
|
mail.protocol=SMTP |
||||||
|
mail.server.host=smtp.office365.com |
||||||
|
mail.server.port=587 |
||||||
|
mail.sender=qiaozhanwei@outlook.com |
||||||
|
mail.passwd=eschedulerBJEG |
||||||
|
|
||||||
|
# TLS |
||||||
|
mail.smtp.starttls.enable=true |
||||||
|
# SSL |
||||||
|
mail.smtp.ssl.enable=false |
||||||
|
|
||||||
|
#xls file path,need create if not exist |
||||||
|
xls.file.path=/tmp/xls |
||||||
|
|
||||||
|
# Enterprise WeChat configuration |
||||||
|
enterprise.wechat.corp.id=xxxxxxx |
||||||
|
enterprise.wechat.secret=xxxxxxx |
||||||
|
enterprise.wechat.agent.id=xxxxxxx |
||||||
|
enterprise.wechat.users=xxxxxxx |
||||||
|
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret |
||||||
|
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token |
||||||
|
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"} |
||||||
|
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}} |
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,31 @@ |
|||||||
|
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html --> |
||||||
|
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> |
||||||
|
<property name="log.base" value="logs" /> |
||||||
|
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> |
||||||
|
<encoder> |
||||||
|
<pattern> |
||||||
|
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||||
|
</pattern> |
||||||
|
<charset>UTF-8</charset> |
||||||
|
</encoder> |
||||||
|
</appender> |
||||||
|
|
||||||
|
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> |
||||||
|
<file>${log.base}/escheduler-alert.log</file> |
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> |
||||||
|
<fileNamePattern>${log.base}/escheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> |
||||||
|
<maxHistory>20</maxHistory> |
||||||
|
<maxFileSize>64MB</maxFileSize> |
||||||
|
</rollingPolicy> |
||||||
|
<encoder> |
||||||
|
<pattern> |
||||||
|
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||||
|
</pattern> |
||||||
|
<charset>UTF-8</charset> |
||||||
|
</encoder> |
||||||
|
</appender> |
||||||
|
|
||||||
|
<root level="INFO"> |
||||||
|
<appender-ref ref="ALERTLOGFILE"/> |
||||||
|
</root> |
||||||
|
</configuration> |
@ -0,0 +1,42 @@ |
|||||||
|
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html --> |
||||||
|
<configuration scan="true" scanPeriod="120 seconds"> |
||||||
|
<logger name="org.apache.zookeeper" level="WARN"/> |
||||||
|
<logger name="org.apache.hbase" level="WARN"/> |
||||||
|
<logger name="org.apache.hadoop" level="WARN"/> |
||||||
|
|
||||||
|
<property name="log.base" value="logs" /> |
||||||
|
|
||||||
|
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> |
||||||
|
<encoder> |
||||||
|
<pattern> |
||||||
|
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||||
|
</pattern> |
||||||
|
<charset>UTF-8</charset> |
||||||
|
</encoder> |
||||||
|
</appender> |
||||||
|
|
||||||
|
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> |
||||||
|
<!-- Log level filter --> |
||||||
|
<filter class="ch.qos.logback.classic.filter.ThresholdFilter"> |
||||||
|
<level>INFO</level> |
||||||
|
</filter> |
||||||
|
<file>${log.base}/escheduler-api-server.log</file> |
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> |
||||||
|
<fileNamePattern>${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> |
||||||
|
<maxHistory>168</maxHistory> |
||||||
|
<maxFileSize>64MB</maxFileSize> |
||||||
|
</rollingPolicy> |
||||||
|
|
||||||
|
<encoder> |
||||||
|
<pattern> |
||||||
|
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||||
|
</pattern> |
||||||
|
<charset>UTF-8</charset> |
||||||
|
</encoder> |
||||||
|
|
||||||
|
</appender> |
||||||
|
|
||||||
|
<root level="INFO"> |
||||||
|
<appender-ref ref="APISERVERLOGFILE" /> |
||||||
|
</root> |
||||||
|
</configuration> |
@ -0,0 +1,19 @@ |
|||||||
|
# server port |
||||||
|
server.port=12345 |
||||||
|
|
||||||
|
# session config |
||||||
|
server.servlet.session.timeout=7200 |
||||||
|
|
||||||
|
server.servlet.context-path=/escheduler/ |
||||||
|
|
||||||
|
# file size limit for upload |
||||||
|
spring.servlet.multipart.max-file-size=1024MB |
||||||
|
spring.servlet.multipart.max-request-size=1024MB |
||||||
|
|
||||||
|
#post content |
||||||
|
server.jetty.max-http-post-size=5000000 |
||||||
|
|
||||||
|
spring.messages.encoding=UTF-8 |
||||||
|
|
||||||
|
#i18n classpath folder , file prefix messages, if have many files, use "," seperator |
||||||
|
spring.messages.basename=i18n/messages |
@ -0,0 +1 @@ |
|||||||
|
logging.config=classpath:master_logback.xml |
@ -0,0 +1,42 @@ |
|||||||
|
#task queue implementation, default "zookeeper" |
||||||
|
escheduler.queue.impl=zookeeper |
||||||
|
|
||||||
|
# user data directory path, self configuration, please make sure the directory exists and have read write permissions |
||||||
|
data.basedir.path=/tmp/escheduler |
||||||
|
|
||||||
|
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions |
||||||
|
data.download.basedir.path=/tmp/escheduler/download |
||||||
|
|
||||||
|
# process execute directory. self configuration, please make sure the directory exists and have read write permissions |
||||||
|
process.exec.basepath=/tmp/escheduler/exec |
||||||
|
|
||||||
|
# Users who have permission to create directories under the HDFS root path |
||||||
|
hdfs.root.user=hdfs |
||||||
|
|
||||||
|
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended |
||||||
|
data.store2hdfs.basepath=/escheduler |
||||||
|
|
||||||
|
# resource upload startup type : HDFS,S3,NONE |
||||||
|
res.upload.startup.type=NONE |
||||||
|
|
||||||
|
# whether kerberos starts |
||||||
|
hadoop.security.authentication.startup.state=false |
||||||
|
|
||||||
|
# java.security.krb5.conf path |
||||||
|
java.security.krb5.conf.path=/opt/krb5.conf |
||||||
|
|
||||||
|
# loginUserFromKeytab user |
||||||
|
login.user.keytab.username=hdfs-mycluster@ESZ.COM |
||||||
|
|
||||||
|
# loginUserFromKeytab path |
||||||
|
login.user.keytab.path=/opt/hdfs.headless.keytab |
||||||
|
|
||||||
|
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions |
||||||
|
escheduler.env.path=/opt/escheduler/conf/env/.escheduler_env.sh |
||||||
|
|
||||||
|
#resource.view.suffixs |
||||||
|
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml |
||||||
|
|
||||||
|
# is development state? default "false" |
||||||
|
development.state=true |
||||||
|
|
@ -0,0 +1,18 @@ |
|||||||
|
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml |
||||||
|
# to the conf directory,support s3,for example : s3a://escheduler |
||||||
|
fs.defaultFS=hdfs://mycluster:8020 |
||||||
|
|
||||||
|
# s3 need,s3 endpoint |
||||||
|
fs.s3a.endpoint=http://192.168.199.91:9010 |
||||||
|
|
||||||
|
# s3 need,s3 access key |
||||||
|
fs.s3a.access.key=A3DXS30FO22544RE |
||||||
|
|
||||||
|
# s3 need,s3 secret key |
||||||
|
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK |
||||||
|
|
||||||
|
#resourcemanager ha note this need ips , this empty if single |
||||||
|
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx |
||||||
|
|
||||||
|
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine |
||||||
|
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s |
@ -0,0 +1,3 @@ |
|||||||
|
installPath=/data1_1T/escheduler |
||||||
|
deployUser=escheduler |
||||||
|
ips=ark0,ark1,ark2,ark3,ark4 |
@ -0,0 +1,4 @@ |
|||||||
|
masters=ark0,ark1 |
||||||
|
workers=ark2,ark3,ark4 |
||||||
|
alertServer=ark3 |
||||||
|
apiServers=ark1 |
@ -0,0 +1,53 @@ |
|||||||
|
# base spring data source configuration |
||||||
|
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource |
||||||
|
spring.datasource.driver-class-name=com.mysql.jdbc.Driver |
||||||
|
spring.datasource.url=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=UTF-8 |
||||||
|
spring.datasource.username=root |
||||||
|
spring.datasource.password=root@123 |
||||||
|
|
||||||
|
# connection configuration |
||||||
|
spring.datasource.initialSize=5 |
||||||
|
# min connection number |
||||||
|
spring.datasource.minIdle=5 |
||||||
|
# max connection number |
||||||
|
spring.datasource.maxActive=50 |
||||||
|
|
||||||
|
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. |
||||||
|
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. |
||||||
|
spring.datasource.maxWait=60000 |
||||||
|
|
||||||
|
# milliseconds for check to close free connections |
||||||
|
spring.datasource.timeBetweenEvictionRunsMillis=60000 |
||||||
|
|
||||||
|
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. |
||||||
|
spring.datasource.timeBetweenConnectErrorMillis=60000 |
||||||
|
|
||||||
|
# the longest time a connection remains idle without being evicted, in milliseconds |
||||||
|
spring.datasource.minEvictableIdleTimeMillis=300000 |
||||||
|
|
||||||
|
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. |
||||||
|
spring.datasource.validationQuery=SELECT 1 |
||||||
|
#check whether the connection is valid for timeout, in seconds |
||||||
|
spring.datasource.validationQueryTimeout=3 |
||||||
|
|
||||||
|
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis, |
||||||
|
# validation Query is performed to check whether the connection is valid |
||||||
|
spring.datasource.testWhileIdle=true |
||||||
|
|
||||||
|
#execute validation to check if the connection is valid when applying for a connection |
||||||
|
spring.datasource.testOnBorrow=true |
||||||
|
#execute validation to check if the connection is valid when the connection is returned |
||||||
|
spring.datasource.testOnReturn=false |
||||||
|
spring.datasource.defaultAutoCommit=true |
||||||
|
spring.datasource.keepAlive=true |
||||||
|
|
||||||
|
# open PSCache, specify count PSCache for every connection |
||||||
|
spring.datasource.poolPreparedStatements=true |
||||||
|
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20 |
||||||
|
|
||||||
|
# data quality analysis is not currently in use. please ignore the following configuration |
||||||
|
# task record flag |
||||||
|
task.record.flag=false |
||||||
|
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8 |
||||||
|
task.record.datasource.username=xx |
||||||
|
task.record.datasource.password=xx |
@ -0,0 +1,3 @@ |
|||||||
|
export PYTHON_HOME=/usr/bin/python |
||||||
|
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 |
||||||
|
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH |
@ -0,0 +1,235 @@ |
|||||||
|
QUERY_SCHEDULE_LIST_NOTES=query schedule list |
||||||
|
EXECUTE_PROCESS_TAG=execute process related operation |
||||||
|
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation |
||||||
|
RUN_PROCESS_INSTANCE_NOTES=run process instance |
||||||
|
START_NODE_LIST=start node list(node name) |
||||||
|
TASK_DEPEND_TYPE=task depend type |
||||||
|
COMMAND_TYPE=command type |
||||||
|
RUN_MODE=run mode |
||||||
|
TIMEOUT=timeout |
||||||
|
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance |
||||||
|
EXECUTE_TYPE=execute type |
||||||
|
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition |
||||||
|
GET_RECEIVER_CC_NOTES=query receiver cc |
||||||
|
DESC=description |
||||||
|
GROUP_NAME=group name |
||||||
|
GROUP_TYPE=group type |
||||||
|
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list |
||||||
|
UPDATE_ALERT_GROUP_NOTES=update alert group |
||||||
|
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id |
||||||
|
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not |
||||||
|
GRANT_ALERT_GROUP_NOTES=grant alert group |
||||||
|
USER_IDS=user id list |
||||||
|
ALERT_GROUP_TAG=alert group related operation |
||||||
|
CREATE_ALERT_GROUP_NOTES=create alert group |
||||||
|
WORKER_GROUP_TAG=worker group related operation |
||||||
|
SAVE_WORKER_GROUP_NOTES=create worker group |
||||||
|
WORKER_GROUP_NAME=worker group name |
||||||
|
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 |
||||||
|
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging |
||||||
|
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list |
||||||
|
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id |
||||||
|
DATA_ANALYSIS_TAG=analysis related operation of task state |
||||||
|
COUNT_TASK_STATE_NOTES=count task state |
||||||
|
COUNT_PROCESS_INSTANCE_NOTES=count process instance state |
||||||
|
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user |
||||||
|
COUNT_COMMAND_STATE_NOTES=count command state |
||||||
|
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ |
||||||
|
|
||||||
|
ACCESS_TOKEN_TAG=access token related operation |
||||||
|
MONITOR_TAG=monitor related operation |
||||||
|
MASTER_LIST_NOTES=master server list |
||||||
|
WORKER_LIST_NOTES=worker server list |
||||||
|
QUERY_DATABASE_STATE_NOTES=query database state |
||||||
|
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE |
||||||
|
TASK_STATE=task instance state |
||||||
|
SOURCE_TABLE=SOURCE TABLE |
||||||
|
DEST_TABLE=dest table |
||||||
|
TASK_DATE=task date |
||||||
|
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging |
||||||
|
DATA_SOURCE_TAG=data source related operation |
||||||
|
CREATE_DATA_SOURCE_NOTES=create data source |
||||||
|
DATA_SOURCE_NAME=data source name |
||||||
|
DATA_SOURCE_NOTE=data source desc |
||||||
|
DB_TYPE=database type |
||||||
|
DATA_SOURCE_HOST=DATA SOURCE HOST |
||||||
|
DATA_SOURCE_PORT=data source port |
||||||
|
DATABASE_NAME=database name |
||||||
|
QUEUE_TAG=queue related operation |
||||||
|
QUERY_QUEUE_LIST_NOTES=query queue list |
||||||
|
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging |
||||||
|
CREATE_QUEUE_NOTES=create queue |
||||||
|
YARN_QUEUE_NAME=yarn(hadoop) queue name |
||||||
|
QUEUE_ID=queue id |
||||||
|
TENANT_DESC=tenant desc |
||||||
|
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging |
||||||
|
QUERY_TENANT_LIST_NOTES=query tenant list |
||||||
|
UPDATE_TENANT_NOTES=update tenant |
||||||
|
DELETE_TENANT_NOTES=delete tenant |
||||||
|
RESOURCES_TAG=resource center related operation |
||||||
|
CREATE_RESOURCE_NOTES=create resource |
||||||
|
RESOURCE_TYPE=resource file type |
||||||
|
RESOURCE_NAME=resource name |
||||||
|
RESOURCE_DESC=resource file desc |
||||||
|
RESOURCE_FILE=resource file |
||||||
|
RESOURCE_ID=resource id |
||||||
|
QUERY_RESOURCE_LIST_NOTES=query resource list |
||||||
|
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id |
||||||
|
VIEW_RESOURCE_BY_ID_NOTES=view resource by id |
||||||
|
ONLINE_CREATE_RESOURCE_NOTES=online create resource |
||||||
|
SUFFIX=resource file suffix |
||||||
|
CONTENT=resource file content |
||||||
|
UPDATE_RESOURCE_NOTES=edit resource file online |
||||||
|
DOWNLOAD_RESOURCE_NOTES=download resource file |
||||||
|
CREATE_UDF_FUNCTION_NOTES=create udf function |
||||||
|
UDF_TYPE=UDF type |
||||||
|
FUNC_NAME=function name |
||||||
|
CLASS_NAME=package and class name |
||||||
|
ARG_TYPES=arguments |
||||||
|
UDF_DESC=udf desc |
||||||
|
VIEW_UDF_FUNCTION_NOTES=view udf function |
||||||
|
UPDATE_UDF_FUNCTION_NOTES=update udf function |
||||||
|
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging |
||||||
|
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name |
||||||
|
DELETE_UDF_FUNCTION_NOTES=delete udf function |
||||||
|
AUTHORIZED_FILE_NOTES=authorized file |
||||||
|
UNAUTHORIZED_FILE_NOTES=unauthorized file |
||||||
|
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func |
||||||
|
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func |
||||||
|
VERIFY_QUEUE_NOTES=verify queue |
||||||
|
TENANT_TAG=tenant related operation |
||||||
|
CREATE_TENANT_NOTES=create tenant |
||||||
|
TENANT_CODE=tenant code |
||||||
|
TENANT_NAME=tenant name |
||||||
|
QUEUE_NAME=queue name |
||||||
|
PASSWORD=password |
||||||
|
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} |
||||||
|
PROJECT_TAG=project related operation |
||||||
|
CREATE_PROJECT_NOTES=create project |
||||||
|
PROJECT_DESC=project description |
||||||
|
UPDATE_PROJECT_NOTES=update project |
||||||
|
PROJECT_ID=project id |
||||||
|
QUERY_PROJECT_BY_ID_NOTES=query project info by project id |
||||||
|
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING |
||||||
|
QUERY_ALL_PROJECT_LIST_NOTES=query all project list |
||||||
|
DELETE_PROJECT_BY_ID_NOTES=delete project by id |
||||||
|
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project |
||||||
|
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project |
||||||
|
TASK_RECORD_TAG=task record related operation |
||||||
|
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging |
||||||
|
CREATE_TOKEN_NOTES=create token ,note: please login first |
||||||
|
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging |
||||||
|
SCHEDULE=schedule |
||||||
|
WARNING_TYPE=warning type(sending strategy) |
||||||
|
WARNING_GROUP_ID=warning group id |
||||||
|
FAILURE_STRATEGY=failure strategy |
||||||
|
RECEIVERS=receivers |
||||||
|
RECEIVERS_CC=receivers cc |
||||||
|
WORKER_GROUP_ID=worker server group id |
||||||
|
PROCESS_INSTANCE_PRIORITY=process instance priority |
||||||
|
UPDATE_SCHEDULE_NOTES=update schedule |
||||||
|
SCHEDULE_ID=schedule id |
||||||
|
ONLINE_SCHEDULE_NOTES=online schedule |
||||||
|
OFFLINE_SCHEDULE_NOTES=offline schedule |
||||||
|
QUERY_SCHEDULE_NOTES=query schedule |
||||||
|
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging |
||||||
|
LOGIN_TAG=User login related operations |
||||||
|
USER_NAME=user name |
||||||
|
PROJECT_NAME=project name |
||||||
|
CREATE_PROCESS_DEFINITION_NOTES=create process definition |
||||||
|
PROCESS_DEFINITION_NAME=process definition name |
||||||
|
PROCESS_DEFINITION_JSON=process definition detail info (json format) |
||||||
|
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) |
||||||
|
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) |
||||||
|
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) |
||||||
|
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) |
||||||
|
PROCESS_DEFINITION_DESC=process definition desc |
||||||
|
PROCESS_DEFINITION_TAG=process definition related opertation |
||||||
|
SIGNOUT_NOTES=logout |
||||||
|
USER_PASSWORD=user password |
||||||
|
UPDATE_PROCESS_INSTANCE_NOTES=update process instance |
||||||
|
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list |
||||||
|
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name |
||||||
|
LOGIN_NOTES=user login |
||||||
|
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition |
||||||
|
PROCESS_DEFINITION_ID=process definition id |
||||||
|
PROCESS_DEFINITION_IDS=process definition ids |
||||||
|
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition |
||||||
|
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id |
||||||
|
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list |
||||||
|
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging |
||||||
|
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list |
||||||
|
PAGE_NO=page no |
||||||
|
PROCESS_INSTANCE_ID=process instance id |
||||||
|
PROCESS_INSTANCE_JSON=process instance info(json format) |
||||||
|
SCHEDULE_TIME=schedule time |
||||||
|
SYNC_DEFINE=update the information of the process instance to the process definition\ |
||||||
|
|
||||||
|
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance |
||||||
|
SEARCH_VAL=search val |
||||||
|
USER_ID=user id |
||||||
|
PAGE_SIZE=page size |
||||||
|
LIMIT=limit |
||||||
|
VIEW_TREE_NOTES=view tree |
||||||
|
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id |
||||||
|
PROCESS_DEFINITION_ID_LIST=process definition id list |
||||||
|
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id |
||||||
|
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id |
||||||
|
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids |
||||||
|
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id |
||||||
|
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id |
||||||
|
TASK_ID=task instance id |
||||||
|
SKIP_LINE_NUM=skip line num |
||||||
|
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log |
||||||
|
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log |
||||||
|
USERS_TAG=users related operation |
||||||
|
SCHEDULER_TAG=scheduler related operation |
||||||
|
CREATE_SCHEDULE_NOTES=create schedule |
||||||
|
CREATE_USER_NOTES=create user |
||||||
|
TENANT_ID=tenant id |
||||||
|
QUEUE=queue |
||||||
|
EMAIL=email |
||||||
|
PHONE=phone |
||||||
|
QUERY_USER_LIST_NOTES=query user list |
||||||
|
UPDATE_USER_NOTES=update user |
||||||
|
DELETE_USER_BY_ID_NOTES=delete user by id |
||||||
|
GRANT_PROJECT_NOTES=GRANT PROJECT |
||||||
|
PROJECT_IDS=project ids(string format, multiple projects separated by ",") |
||||||
|
GRANT_RESOURCE_NOTES=grant resource file |
||||||
|
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") |
||||||
|
GET_USER_INFO_NOTES=get user info |
||||||
|
LIST_USER_NOTES=list user |
||||||
|
VERIFY_USER_NAME_NOTES=verify user name |
||||||
|
UNAUTHORIZED_USER_NOTES=cancel authorization |
||||||
|
ALERT_GROUP_ID=alert group id |
||||||
|
AUTHORIZED_USER_NOTES=authorized user |
||||||
|
GRANT_UDF_FUNC_NOTES=grant udf function |
||||||
|
UDF_IDS=udf ids(string format, multiple udf functions separated by ",") |
||||||
|
GRANT_DATASOURCE_NOTES=grant datasource |
||||||
|
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") |
||||||
|
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id |
||||||
|
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id |
||||||
|
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables |
||||||
|
VIEW_GANTT_NOTES=view gantt |
||||||
|
SUB_PROCESS_INSTANCE_ID=sub process instance id |
||||||
|
TASK_NAME=task instance name |
||||||
|
TASK_INSTANCE_TAG=task instance related operation |
||||||
|
LOGGER_TAG=log related operation |
||||||
|
PROCESS_INSTANCE_TAG=process instance related operation |
||||||
|
EXECUTION_STATUS=runing status for workflow and task nodes |
||||||
|
HOST=ip address of running task |
||||||
|
START_DATE=start date |
||||||
|
END_DATE=end date |
||||||
|
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id |
||||||
|
UPDATE_DATA_SOURCE_NOTES=update data source |
||||||
|
DATA_SOURCE_ID=DATA SOURCE ID |
||||||
|
QUERY_DATA_SOURCE_NOTES=query data source by id |
||||||
|
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type |
||||||
|
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging |
||||||
|
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE |
||||||
|
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test |
||||||
|
DELETE_DATA_SOURCE_NOTES=delete data source |
||||||
|
VERIFY_DATA_SOURCE_NOTES=verify data source |
||||||
|
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source |
||||||
|
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source |
||||||
|
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id |
@ -0,0 +1,235 @@ |
|||||||
|
QUERY_SCHEDULE_LIST_NOTES=query schedule list |
||||||
|
EXECUTE_PROCESS_TAG=execute process related operation |
||||||
|
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation |
||||||
|
RUN_PROCESS_INSTANCE_NOTES=run process instance |
||||||
|
START_NODE_LIST=start node list(node name) |
||||||
|
TASK_DEPEND_TYPE=task depend type |
||||||
|
COMMAND_TYPE=command type |
||||||
|
RUN_MODE=run mode |
||||||
|
TIMEOUT=timeout |
||||||
|
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance |
||||||
|
EXECUTE_TYPE=execute type |
||||||
|
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition |
||||||
|
GET_RECEIVER_CC_NOTES=query receiver cc |
||||||
|
DESC=description |
||||||
|
GROUP_NAME=group name |
||||||
|
GROUP_TYPE=group type |
||||||
|
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list |
||||||
|
UPDATE_ALERT_GROUP_NOTES=update alert group |
||||||
|
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id |
||||||
|
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not |
||||||
|
GRANT_ALERT_GROUP_NOTES=grant alert group |
||||||
|
USER_IDS=user id list |
||||||
|
ALERT_GROUP_TAG=alert group related operation |
||||||
|
CREATE_ALERT_GROUP_NOTES=create alert group |
||||||
|
WORKER_GROUP_TAG=worker group related operation |
||||||
|
SAVE_WORKER_GROUP_NOTES=create worker group |
||||||
|
WORKER_GROUP_NAME=worker group name |
||||||
|
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 |
||||||
|
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging |
||||||
|
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list |
||||||
|
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id |
||||||
|
DATA_ANALYSIS_TAG=analysis related operation of task state |
||||||
|
COUNT_TASK_STATE_NOTES=count task state |
||||||
|
COUNT_PROCESS_INSTANCE_NOTES=count process instance state |
||||||
|
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user |
||||||
|
COUNT_COMMAND_STATE_NOTES=count command state |
||||||
|
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ |
||||||
|
|
||||||
|
ACCESS_TOKEN_TAG=access token related operation |
||||||
|
MONITOR_TAG=monitor related operation |
||||||
|
MASTER_LIST_NOTES=master server list |
||||||
|
WORKER_LIST_NOTES=worker server list |
||||||
|
QUERY_DATABASE_STATE_NOTES=query database state |
||||||
|
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE |
||||||
|
TASK_STATE=task instance state |
||||||
|
SOURCE_TABLE=SOURCE TABLE |
||||||
|
DEST_TABLE=dest table |
||||||
|
TASK_DATE=task date |
||||||
|
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging |
||||||
|
DATA_SOURCE_TAG=data source related operation |
||||||
|
CREATE_DATA_SOURCE_NOTES=create data source |
||||||
|
DATA_SOURCE_NAME=data source name |
||||||
|
DATA_SOURCE_NOTE=data source desc |
||||||
|
DB_TYPE=database type |
||||||
|
DATA_SOURCE_HOST=DATA SOURCE HOST |
||||||
|
DATA_SOURCE_PORT=data source port |
||||||
|
DATABASE_NAME=database name |
||||||
|
QUEUE_TAG=queue related operation |
||||||
|
QUERY_QUEUE_LIST_NOTES=query queue list |
||||||
|
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging |
||||||
|
CREATE_QUEUE_NOTES=create queue |
||||||
|
YARN_QUEUE_NAME=yarn(hadoop) queue name |
||||||
|
QUEUE_ID=queue id |
||||||
|
TENANT_DESC=tenant desc |
||||||
|
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging |
||||||
|
QUERY_TENANT_LIST_NOTES=query tenant list |
||||||
|
UPDATE_TENANT_NOTES=update tenant |
||||||
|
DELETE_TENANT_NOTES=delete tenant |
||||||
|
RESOURCES_TAG=resource center related operation |
||||||
|
CREATE_RESOURCE_NOTES=create resource |
||||||
|
RESOURCE_TYPE=resource file type |
||||||
|
RESOURCE_NAME=resource name |
||||||
|
RESOURCE_DESC=resource file desc |
||||||
|
RESOURCE_FILE=resource file |
||||||
|
RESOURCE_ID=resource id |
||||||
|
QUERY_RESOURCE_LIST_NOTES=query resource list |
||||||
|
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id |
||||||
|
VIEW_RESOURCE_BY_ID_NOTES=view resource by id |
||||||
|
ONLINE_CREATE_RESOURCE_NOTES=online create resource |
||||||
|
SUFFIX=resource file suffix |
||||||
|
CONTENT=resource file content |
||||||
|
UPDATE_RESOURCE_NOTES=edit resource file online |
||||||
|
DOWNLOAD_RESOURCE_NOTES=download resource file |
||||||
|
CREATE_UDF_FUNCTION_NOTES=create udf function |
||||||
|
UDF_TYPE=UDF type |
||||||
|
FUNC_NAME=function name |
||||||
|
CLASS_NAME=package and class name |
||||||
|
ARG_TYPES=arguments |
||||||
|
UDF_DESC=udf desc |
||||||
|
VIEW_UDF_FUNCTION_NOTES=view udf function |
||||||
|
UPDATE_UDF_FUNCTION_NOTES=update udf function |
||||||
|
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging |
||||||
|
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name |
||||||
|
DELETE_UDF_FUNCTION_NOTES=delete udf function |
||||||
|
AUTHORIZED_FILE_NOTES=authorized file |
||||||
|
UNAUTHORIZED_FILE_NOTES=unauthorized file |
||||||
|
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func |
||||||
|
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func |
||||||
|
VERIFY_QUEUE_NOTES=verify queue |
||||||
|
TENANT_TAG=tenant related operation |
||||||
|
CREATE_TENANT_NOTES=create tenant |
||||||
|
TENANT_CODE=tenant code |
||||||
|
TENANT_NAME=tenant name |
||||||
|
QUEUE_NAME=queue name |
||||||
|
PASSWORD=password |
||||||
|
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} |
||||||
|
PROJECT_TAG=project related operation |
||||||
|
CREATE_PROJECT_NOTES=create project |
||||||
|
PROJECT_DESC=project description |
||||||
|
UPDATE_PROJECT_NOTES=update project |
||||||
|
PROJECT_ID=project id |
||||||
|
QUERY_PROJECT_BY_ID_NOTES=query project info by project id |
||||||
|
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING |
||||||
|
QUERY_ALL_PROJECT_LIST_NOTES=query all project list |
||||||
|
DELETE_PROJECT_BY_ID_NOTES=delete project by id |
||||||
|
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project |
||||||
|
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project |
||||||
|
TASK_RECORD_TAG=task record related operation |
||||||
|
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging |
||||||
|
CREATE_TOKEN_NOTES=create token ,note: please login first |
||||||
|
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging |
||||||
|
SCHEDULE=schedule |
||||||
|
WARNING_TYPE=warning type(sending strategy) |
||||||
|
WARNING_GROUP_ID=warning group id |
||||||
|
FAILURE_STRATEGY=failure strategy |
||||||
|
RECEIVERS=receivers |
||||||
|
RECEIVERS_CC=receivers cc |
||||||
|
WORKER_GROUP_ID=worker server group id |
||||||
|
PROCESS_INSTANCE_PRIORITY=process instance priority |
||||||
|
UPDATE_SCHEDULE_NOTES=update schedule |
||||||
|
SCHEDULE_ID=schedule id |
||||||
|
ONLINE_SCHEDULE_NOTES=online schedule |
||||||
|
OFFLINE_SCHEDULE_NOTES=offline schedule |
||||||
|
QUERY_SCHEDULE_NOTES=query schedule |
||||||
|
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging |
||||||
|
LOGIN_TAG=User login related operations |
||||||
|
USER_NAME=user name |
||||||
|
PROJECT_NAME=project name |
||||||
|
CREATE_PROCESS_DEFINITION_NOTES=create process definition |
||||||
|
PROCESS_DEFINITION_NAME=process definition name |
||||||
|
PROCESS_DEFINITION_JSON=process definition detail info (json format) |
||||||
|
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) |
||||||
|
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) |
||||||
|
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) |
||||||
|
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) |
||||||
|
PROCESS_DEFINITION_DESC=process definition desc |
||||||
|
PROCESS_DEFINITION_TAG=process definition related opertation |
||||||
|
SIGNOUT_NOTES=logout |
||||||
|
USER_PASSWORD=user password |
||||||
|
UPDATE_PROCESS_INSTANCE_NOTES=update process instance |
||||||
|
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list |
||||||
|
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name |
||||||
|
LOGIN_NOTES=user login |
||||||
|
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition |
||||||
|
PROCESS_DEFINITION_ID=process definition id |
||||||
|
PROCESS_DEFINITION_IDS=process definition ids |
||||||
|
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition |
||||||
|
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id |
||||||
|
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list |
||||||
|
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging |
||||||
|
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list |
||||||
|
PAGE_NO=page no |
||||||
|
PROCESS_INSTANCE_ID=process instance id |
||||||
|
PROCESS_INSTANCE_JSON=process instance info(json format) |
||||||
|
SCHEDULE_TIME=schedule time |
||||||
|
SYNC_DEFINE=update the information of the process instance to the process definition\ |
||||||
|
|
||||||
|
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance |
||||||
|
SEARCH_VAL=search val |
||||||
|
USER_ID=user id |
||||||
|
PAGE_SIZE=page size |
||||||
|
LIMIT=limit |
||||||
|
VIEW_TREE_NOTES=view tree |
||||||
|
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id |
||||||
|
PROCESS_DEFINITION_ID_LIST=process definition id list |
||||||
|
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id |
||||||
|
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id |
||||||
|
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids |
||||||
|
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id |
||||||
|
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id |
||||||
|
TASK_ID=task instance id |
||||||
|
SKIP_LINE_NUM=skip line num |
||||||
|
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log |
||||||
|
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log |
||||||
|
USERS_TAG=users related operation |
||||||
|
SCHEDULER_TAG=scheduler related operation |
||||||
|
CREATE_SCHEDULE_NOTES=create schedule |
||||||
|
CREATE_USER_NOTES=create user |
||||||
|
TENANT_ID=tenant id |
||||||
|
QUEUE=queue |
||||||
|
EMAIL=email |
||||||
|
PHONE=phone |
||||||
|
QUERY_USER_LIST_NOTES=query user list |
||||||
|
UPDATE_USER_NOTES=update user |
||||||
|
DELETE_USER_BY_ID_NOTES=delete user by id |
||||||
|
GRANT_PROJECT_NOTES=GRANT PROJECT |
||||||
|
PROJECT_IDS=project ids(string format, multiple projects separated by ",") |
||||||
|
GRANT_RESOURCE_NOTES=grant resource file |
||||||
|
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") |
||||||
|
GET_USER_INFO_NOTES=get user info |
||||||
|
LIST_USER_NOTES=list user |
||||||
|
VERIFY_USER_NAME_NOTES=verify user name |
||||||
|
UNAUTHORIZED_USER_NOTES=cancel authorization |
||||||
|
ALERT_GROUP_ID=alert group id |
||||||
|
AUTHORIZED_USER_NOTES=authorized user |
||||||
|
GRANT_UDF_FUNC_NOTES=grant udf function |
||||||
|
UDF_IDS=udf ids(string format, multiple udf functions separated by ",") |
||||||
|
GRANT_DATASOURCE_NOTES=grant datasource |
||||||
|
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") |
||||||
|
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id |
||||||
|
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id |
||||||
|
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables |
||||||
|
VIEW_GANTT_NOTES=view gantt |
||||||
|
SUB_PROCESS_INSTANCE_ID=sub process instance id |
||||||
|
TASK_NAME=task instance name |
||||||
|
TASK_INSTANCE_TAG=task instance related operation |
||||||
|
LOGGER_TAG=log related operation |
||||||
|
PROCESS_INSTANCE_TAG=process instance related operation |
||||||
|
EXECUTION_STATUS=runing status for workflow and task nodes |
||||||
|
HOST=ip address of running task |
||||||
|
START_DATE=start date |
||||||
|
END_DATE=end date |
||||||
|
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id |
||||||
|
UPDATE_DATA_SOURCE_NOTES=update data source |
||||||
|
DATA_SOURCE_ID=DATA SOURCE ID |
||||||
|
QUERY_DATA_SOURCE_NOTES=query data source by id |
||||||
|
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type |
||||||
|
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging |
||||||
|
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE |
||||||
|
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test |
||||||
|
DELETE_DATA_SOURCE_NOTES=delete data source |
||||||
|
VERIFY_DATA_SOURCE_NOTES=verify data source |
||||||
|
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source |
||||||
|
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source |
||||||
|
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id |
@ -0,0 +1,233 @@ |
|||||||
|
QUERY_SCHEDULE_LIST_NOTES=查询定时列表 |
||||||
|
PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作 |
||||||
|
RUN_PROCESS_INSTANCE_NOTES=运行流程实例 |
||||||
|
START_NODE_LIST=开始节点列表(节点name) |
||||||
|
TASK_DEPEND_TYPE=任务依赖类型 |
||||||
|
COMMAND_TYPE=指令类型 |
||||||
|
RUN_MODE=运行模式 |
||||||
|
TIMEOUT=超时时间 |
||||||
|
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等) |
||||||
|
EXECUTE_TYPE=执行类型 |
||||||
|
START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义 |
||||||
|
DESC=备注(描述) |
||||||
|
GROUP_NAME=组名称 |
||||||
|
GROUP_TYPE=组类型 |
||||||
|
QUERY_ALERT_GROUP_LIST_NOTES=告警组列表\ |
||||||
|
|
||||||
|
UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组 |
||||||
|
DELETE_ALERT_GROUP_BY_ID_NOTES=删除告警组通过ID |
||||||
|
VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在 |
||||||
|
GRANT_ALERT_GROUP_NOTES=授权告警组 |
||||||
|
USER_IDS=用户ID列表 |
||||||
|
ALERT_GROUP_TAG=告警组相关操作 |
||||||
|
WORKER_GROUP_TAG=Worker分组管理 |
||||||
|
SAVE_WORKER_GROUP_NOTES=创建Worker分组\ |
||||||
|
|
||||||
|
WORKER_GROUP_NAME=Worker分组名称 |
||||||
|
WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割\ |
||||||
|
|
||||||
|
QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理 |
||||||
|
QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组 |
||||||
|
DELETE_WORKER_GROUP_BY_ID_NOTES=删除worker group通过ID |
||||||
|
DATA_ANALYSIS_TAG=任务状态分析相关操作 |
||||||
|
COUNT_TASK_STATE_NOTES=任务状态统计 |
||||||
|
COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态 |
||||||
|
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义 |
||||||
|
COUNT_COMMAND_STATE_NOTES=统计命令状态 |
||||||
|
COUNT_QUEUE_STATE_NOTES=统计队列里任务状态 |
||||||
|
ACCESS_TOKEN_TAG=access token相关操作,需要先登录 |
||||||
|
MONITOR_TAG=监控相关操作 |
||||||
|
MASTER_LIST_NOTES=master服务列表 |
||||||
|
WORKER_LIST_NOTES=worker服务列表 |
||||||
|
QUERY_DATABASE_STATE_NOTES=查询数据库状态 |
||||||
|
QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态 |
||||||
|
TASK_STATE=任务实例状态 |
||||||
|
SOURCE_TABLE=源表 |
||||||
|
DEST_TABLE=目标表 |
||||||
|
TASK_DATE=任务时间 |
||||||
|
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表 |
||||||
|
DATA_SOURCE_TAG=数据源相关操作 |
||||||
|
CREATE_DATA_SOURCE_NOTES=创建数据源 |
||||||
|
DATA_SOURCE_NAME=数据源名称 |
||||||
|
DATA_SOURCE_NOTE=数据源描述 |
||||||
|
DB_TYPE=数据源类型 |
||||||
|
DATA_SOURCE_HOST=IP主机名 |
||||||
|
DATA_SOURCE_PORT=数据源端口 |
||||||
|
DATABASE_NAME=数据库名 |
||||||
|
QUEUE_TAG=队列相关操作 |
||||||
|
QUERY_QUEUE_LIST_NOTES=查询队列列表 |
||||||
|
QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表 |
||||||
|
CREATE_QUEUE_NOTES=创建队列 |
||||||
|
YARN_QUEUE_NAME=hadoop yarn队列名 |
||||||
|
QUEUE_ID=队列ID |
||||||
|
TENANT_DESC=租户描述 |
||||||
|
QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表 |
||||||
|
QUERY_TENANT_LIST_NOTES=查询租户列表 |
||||||
|
UPDATE_TENANT_NOTES=更新租户 |
||||||
|
DELETE_TENANT_NOTES=删除租户 |
||||||
|
RESOURCES_TAG=资源中心相关操作 |
||||||
|
CREATE_RESOURCE_NOTES=创建资源 |
||||||
|
RESOURCE_TYPE=资源文件类型 |
||||||
|
RESOURCE_NAME=资源文件名称 |
||||||
|
RESOURCE_DESC=资源文件描述 |
||||||
|
RESOURCE_FILE=资源文件 |
||||||
|
RESOURCE_ID=资源ID |
||||||
|
QUERY_RESOURCE_LIST_NOTES=查询资源列表 |
||||||
|
DELETE_RESOURCE_BY_ID_NOTES=删除资源通过ID |
||||||
|
VIEW_RESOURCE_BY_ID_NOTES=浏览资源通通过ID |
||||||
|
ONLINE_CREATE_RESOURCE_NOTES=在线创建资源 |
||||||
|
SUFFIX=资源文件后缀 |
||||||
|
CONTENT=资源文件内容 |
||||||
|
UPDATE_RESOURCE_NOTES=在线更新资源文件 |
||||||
|
DOWNLOAD_RESOURCE_NOTES=下载资源文件 |
||||||
|
CREATE_UDF_FUNCTION_NOTES=创建UDF函数 |
||||||
|
UDF_TYPE=UDF类型 |
||||||
|
FUNC_NAME=函数名称 |
||||||
|
CLASS_NAME=包名类名 |
||||||
|
ARG_TYPES=参数 |
||||||
|
UDF_DESC=udf描述,使用说明 |
||||||
|
VIEW_UDF_FUNCTION_NOTES=查看udf函数 |
||||||
|
UPDATE_UDF_FUNCTION_NOTES=更新udf函数 |
||||||
|
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表 |
||||||
|
VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名 |
||||||
|
DELETE_UDF_FUNCTION_NOTES=删除UDF函数 |
||||||
|
AUTHORIZED_FILE_NOTES=授权文件 |
||||||
|
UNAUTHORIZED_FILE_NOTES=取消授权文件 |
||||||
|
AUTHORIZED_UDF_FUNC_NOTES=授权udf函数 |
||||||
|
UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权 |
||||||
|
VERIFY_QUEUE_NOTES=验证队列 |
||||||
|
TENANT_TAG=租户相关操作 |
||||||
|
CREATE_TENANT_NOTES=创建租户 |
||||||
|
TENANT_CODE=租户编码 |
||||||
|
TENANT_NAME=租户名称 |
||||||
|
QUEUE_NAME=队列名 |
||||||
|
PASSWORD=密码 |
||||||
|
DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...} |
||||||
|
PROJECT_TAG=项目相关操作 |
||||||
|
CREATE_PROJECT_NOTES=创建项目 |
||||||
|
PROJECT_DESC=项目描述 |
||||||
|
UPDATE_PROJECT_NOTES=更新项目 |
||||||
|
PROJECT_ID=项目ID |
||||||
|
QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 |
||||||
|
QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 |
||||||
|
QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目 |
||||||
|
DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID |
||||||
|
QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 |
||||||
|
QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 |
||||||
|
TASK_RECORD_TAG=任务记录相关操作 |
||||||
|
QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表 |
||||||
|
CREATE_TOKEN_NOTES=创建token,注意需要先登录 |
||||||
|
QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表 |
||||||
|
SCHEDULE=定时 |
||||||
|
WARNING_TYPE=发送策略 |
||||||
|
WARNING_GROUP_ID=发送组ID |
||||||
|
FAILURE_STRATEGY=失败策略 |
||||||
|
RECEIVERS=收件人 |
||||||
|
RECEIVERS_CC=收件人(抄送) |
||||||
|
WORKER_GROUP_ID=Worker Server分组ID |
||||||
|
PROCESS_INSTANCE_PRIORITY=流程实例优先级 |
||||||
|
UPDATE_SCHEDULE_NOTES=更新定时 |
||||||
|
SCHEDULE_ID=定时ID |
||||||
|
ONLINE_SCHEDULE_NOTES=定时上线 |
||||||
|
OFFLINE_SCHEDULE_NOTES=定时下线 |
||||||
|
QUERY_SCHEDULE_NOTES=查询定时 |
||||||
|
QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时 |
||||||
|
LOGIN_TAG=用户登录相关操作 |
||||||
|
USER_NAME=用户名 |
||||||
|
PROJECT_NAME=项目名称 |
||||||
|
CREATE_PROCESS_DEFINITION_NOTES=创建流程定义 |
||||||
|
PROCESS_DEFINITION_NAME=流程定义名称 |
||||||
|
PROCESS_DEFINITION_JSON=流程定义详细信息(json格式) |
||||||
|
PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式) |
||||||
|
PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式) |
||||||
|
PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式) |
||||||
|
PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式) |
||||||
|
PROCESS_DEFINITION_DESC=流程定义描述信息 |
||||||
|
PROCESS_DEFINITION_TAG=流程定义相关操作 |
||||||
|
SIGNOUT_NOTES=退出登录 |
||||||
|
USER_PASSWORD=用户密码 |
||||||
|
UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例 |
||||||
|
QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表 |
||||||
|
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=验证流程定义名字 |
||||||
|
LOGIN_NOTES=用户登录 |
||||||
|
UPDATE_PROCCESS_DEFINITION_NOTES=更新流程定义 |
||||||
|
PROCESS_DEFINITION_ID=流程定义ID |
||||||
|
RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义 |
||||||
|
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID |
||||||
|
QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表 |
||||||
|
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 |
||||||
|
QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义 |
||||||
|
PAGE_NO=页码号 |
||||||
|
PROCESS_INSTANCE_ID=流程实例ID |
||||||
|
PROCESS_INSTANCE_IDS=流程实例ID集合 |
||||||
|
PROCESS_INSTANCE_JSON=流程实例信息(json格式) |
||||||
|
SCHEDULE_TIME=定时时间 |
||||||
|
SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 |
||||||
|
RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例 |
||||||
|
SEARCH_VAL=搜索值 |
||||||
|
USER_ID=用户ID |
||||||
|
PAGE_SIZE=页大小 |
||||||
|
LIMIT=显示多少条 |
||||||
|
VIEW_TREE_NOTES=树状图 |
||||||
|
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID |
||||||
|
PROCESS_DEFINITION_ID_LIST=流程定义id列表 |
||||||
|
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID |
||||||
|
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID |
||||||
|
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合 |
||||||
|
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID |
||||||
|
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID |
||||||
|
TASK_ID=任务实例ID |
||||||
|
SKIP_LINE_NUM=忽略行数 |
||||||
|
QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志 |
||||||
|
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志 |
||||||
|
USERS_TAG=用户相关操作 |
||||||
|
SCHEDULER_TAG=定时相关操作 |
||||||
|
CREATE_SCHEDULE_NOTES=创建定时 |
||||||
|
CREATE_USER_NOTES=创建用户 |
||||||
|
TENANT_ID=租户ID |
||||||
|
QUEUE=使用的队列 |
||||||
|
EMAIL=邮箱 |
||||||
|
PHONE=手机号 |
||||||
|
QUERY_USER_LIST_NOTES=查询用户列表 |
||||||
|
UPDATE_USER_NOTES=更新用户 |
||||||
|
DELETE_USER_BY_ID_NOTES=删除用户通过ID |
||||||
|
GRANT_PROJECT_NOTES=授权项目 |
||||||
|
PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割) |
||||||
|
GRANT_RESOURCE_NOTES=授权资源文件 |
||||||
|
RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割) |
||||||
|
GET_USER_INFO_NOTES=获取用户信息 |
||||||
|
LIST_USER_NOTES=用户列表 |
||||||
|
VERIFY_USER_NAME_NOTES=验证用户名 |
||||||
|
UNAUTHORIZED_USER_NOTES=取消授权 |
||||||
|
ALERT_GROUP_ID=报警组ID |
||||||
|
AUTHORIZED_USER_NOTES=授权用户 |
||||||
|
GRANT_UDF_FUNC_NOTES=授权udf函数 |
||||||
|
UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割) |
||||||
|
GRANT_DATASOURCE_NOTES=授权数据源 |
||||||
|
DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割) |
||||||
|
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=查询子流程实例通过任务实例ID |
||||||
|
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=查询父流程实例信息通过子流程实例ID |
||||||
|
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量 |
||||||
|
VIEW_GANTT_NOTES=浏览Gantt图 |
||||||
|
SUB_PROCESS_INSTANCE_ID=子流程是咧ID |
||||||
|
TASK_NAME=任务实例名 |
||||||
|
TASK_INSTANCE_TAG=任务实例相关操作 |
||||||
|
LOGGER_TAG=日志相关操作 |
||||||
|
PROCESS_INSTANCE_TAG=流程实例相关操作 |
||||||
|
EXECUTION_STATUS=工作流和任务节点的运行状态 |
||||||
|
HOST=运行任务的主机IP地址 |
||||||
|
START_DATE=开始时间 |
||||||
|
END_DATE=结束时间 |
||||||
|
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表 |
||||||
|
UPDATE_DATA_SOURCE_NOTES=更新数据源 |
||||||
|
DATA_SOURCE_ID=数据源ID |
||||||
|
QUERY_DATA_SOURCE_NOTES=查询数据源通过ID |
||||||
|
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=查询数据源列表通过数据源类型 |
||||||
|
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表 |
||||||
|
CONNECT_DATA_SOURCE_NOTES=连接数据源 |
||||||
|
CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试 |
||||||
|
DELETE_DATA_SOURCE_NOTES=删除数据源 |
||||||
|
VERIFY_DATA_SOURCE_NOTES=验证数据源 |
||||||
|
UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源 |
||||||
|
AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源 |
||||||
|
DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据 |
@ -0,0 +1 @@ |
|||||||
|
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> easyscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html> |
@ -0,0 +1,21 @@ |
|||||||
|
# master execute thread num |
||||||
|
master.exec.threads=100 |
||||||
|
|
||||||
|
# master execute task number in parallel |
||||||
|
master.exec.task.number=20 |
||||||
|
|
||||||
|
# master heartbeat interval |
||||||
|
master.heartbeat.interval=10 |
||||||
|
|
||||||
|
# master commit task retry times |
||||||
|
master.task.commit.retryTimes=5 |
||||||
|
|
||||||
|
# master commit task interval |
||||||
|
master.task.commit.interval=100 |
||||||
|
|
||||||
|
|
||||||
|
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 |
||||||
|
master.max.cpuload.avg=10 |
||||||
|
|
||||||
|
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G. |
||||||
|
master.reserved.memory=1 |
@ -0,0 +1,34 @@ |
|||||||
|
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html --> |
||||||
|
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> |
||||||
|
<property name="log.base" value="logs" /> |
||||||
|
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> |
||||||
|
<encoder> |
||||||
|
<pattern> |
||||||
|
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||||
|
</pattern> |
||||||
|
<charset>UTF-8</charset> |
||||||
|
</encoder> |
||||||
|
</appender> |
||||||
|
|
||||||
|
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> |
||||||
|
<file>${log.base}/escheduler-master.log</file> |
||||||
|
<filter class="cn.escheduler.server.master.log.MasterLogFilter"> |
||||||
|
<level>INFO</level> |
||||||
|
</filter> |
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> |
||||||
|
<fileNamePattern>${log.base}/escheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> |
||||||
|
<maxHistory>168</maxHistory> |
||||||
|
<maxFileSize>200MB</maxFileSize> |
||||||
|
</rollingPolicy> |
||||||
|
<encoder> |
||||||
|
<pattern> |
||||||
|
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||||
|
</pattern> |
||||||
|
<charset>UTF-8</charset> |
||||||
|
</encoder> |
||||||
|
</appender> |
||||||
|
|
||||||
|
<root level="INFO"> |
||||||
|
<appender-ref ref="MASTERLOGFILE"/> |
||||||
|
</root> |
||||||
|
</configuration> |
@ -0,0 +1,39 @@ |
|||||||
|
#============================================================================ |
||||||
|
# Configure Main Scheduler Properties |
||||||
|
#============================================================================ |
||||||
|
org.quartz.scheduler.instanceName = EasyScheduler |
||||||
|
org.quartz.scheduler.instanceId = AUTO |
||||||
|
org.quartz.scheduler.makeSchedulerThreadDaemon = true |
||||||
|
org.quartz.jobStore.useProperties = false |
||||||
|
|
||||||
|
#============================================================================ |
||||||
|
# Configure ThreadPool |
||||||
|
#============================================================================ |
||||||
|
|
||||||
|
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool |
||||||
|
org.quartz.threadPool.makeThreadsDaemons = true |
||||||
|
org.quartz.threadPool.threadCount = 25 |
||||||
|
org.quartz.threadPool.threadPriority = 5 |
||||||
|
|
||||||
|
#============================================================================ |
||||||
|
# Configure JobStore |
||||||
|
#============================================================================ |
||||||
|
|
||||||
|
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX |
||||||
|
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate |
||||||
|
org.quartz.jobStore.tablePrefix = QRTZ_ |
||||||
|
org.quartz.jobStore.isClustered = true |
||||||
|
org.quartz.jobStore.misfireThreshold = 60000 |
||||||
|
org.quartz.jobStore.clusterCheckinInterval = 5000 |
||||||
|
org.quartz.jobStore.dataSource = myDs |
||||||
|
|
||||||
|
#============================================================================ |
||||||
|
# Configure Datasources |
||||||
|
#============================================================================ |
||||||
|
|
||||||
|
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver |
||||||
|
org.quartz.dataSource.myDs.URL=jdbc:mysql://127.0.0.1:3306/escheduler?characterEncoding=utf8 |
||||||
|
org.quartz.dataSource.myDs.user=root |
||||||
|
org.quartz.dataSource.myDs.password=root@123 |
||||||
|
org.quartz.dataSource.myDs.maxConnections = 10 |
||||||
|
org.quartz.dataSource.myDs.validationQuery = select 1 |
@ -0,0 +1,15 @@ |
|||||||
|
# worker execute thread num |
||||||
|
worker.exec.threads=100 |
||||||
|
|
||||||
|
# worker heartbeat interval |
||||||
|
worker.heartbeat.interval=10 |
||||||
|
|
||||||
|
# submit the number of tasks at a time |
||||||
|
worker.fetch.task.num = 3 |
||||||
|
|
||||||
|
|
||||||
|
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 |
||||||
|
#worker.max.cpuload.avg=10 |
||||||
|
|
||||||
|
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G. |
||||||
|
worker.reserved.memory=1 |
@ -0,0 +1,25 @@ |
|||||||
|
#zookeeper cluster |
||||||
|
zookeeper.quorum=127.0.0.1:2181 |
||||||
|
|
||||||
|
#escheduler root directory |
||||||
|
zookeeper.escheduler.root=/escheduler |
||||||
|
|
||||||
|
#zookeeper server dirctory |
||||||
|
zookeeper.escheduler.dead.servers=/escheduler/dead-servers |
||||||
|
zookeeper.escheduler.masters=/escheduler/masters |
||||||
|
zookeeper.escheduler.workers=/escheduler/workers |
||||||
|
|
||||||
|
#zookeeper lock dirctory |
||||||
|
zookeeper.escheduler.lock.masters=/escheduler/lock/masters |
||||||
|
zookeeper.escheduler.lock.workers=/escheduler/lock/workers |
||||||
|
|
||||||
|
#escheduler failover directory |
||||||
|
zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters |
||||||
|
zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers |
||||||
|
zookeeper.escheduler.lock.failover.startup.masters=/escheduler/lock/failover/startup-masters |
||||||
|
|
||||||
|
#escheduler failover directory |
||||||
|
zookeeper.session.timeout=300 |
||||||
|
zookeeper.connection.timeout=300 |
||||||
|
zookeeper.retry.sleep=1000 |
||||||
|
zookeeper.retry.maxtime=5 |
@ -0,0 +1,31 @@ |
|||||||
|
server { |
||||||
|
listen 8888; |
||||||
|
server_name localhost; |
||||||
|
#charset koi8-r; |
||||||
|
#access_log /var/log/nginx/host.access.log main; |
||||||
|
location / { |
||||||
|
root /opt/easyscheduler_source/escheduler-ui/dist; |
||||||
|
index index.html index.html; |
||||||
|
} |
||||||
|
location /escheduler { |
||||||
|
proxy_pass http://127.0.0.1:12345; |
||||||
|
proxy_set_header Host $host; |
||||||
|
proxy_set_header X-Real-IP $remote_addr; |
||||||
|
proxy_set_header x_real_ipP $remote_addr; |
||||||
|
proxy_set_header remote_addr $remote_addr; |
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
||||||
|
proxy_http_version 1.1; |
||||||
|
proxy_connect_timeout 300s; |
||||||
|
proxy_read_timeout 300s; |
||||||
|
proxy_send_timeout 300s; |
||||||
|
proxy_set_header Upgrade $http_upgrade; |
||||||
|
proxy_set_header Connection "upgrade"; |
||||||
|
} |
||||||
|
#error_page 404 /404.html; |
||||||
|
# redirect server error pages to the static page /50x.html |
||||||
|
# |
||||||
|
error_page 500 502 503 504 /50x.html; |
||||||
|
location = /50x.html { |
||||||
|
root /usr/share/nginx/html; |
||||||
|
} |
||||||
|
} |
@ -1,48 +0,0 @@ |
|||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
server { |
|
||||||
listen 8888; |
|
||||||
server_name localhost; |
|
||||||
#charset koi8-r; |
|
||||||
#access_log /var/log/nginx/host.access.log main; |
|
||||||
location / { |
|
||||||
root /opt/dolphinscheduler_source/dolphinscheduler-ui/dist; |
|
||||||
index index.html index.html; |
|
||||||
} |
|
||||||
location /dolphinscheduler { |
|
||||||
proxy_pass http://127.0.0.1:12345; |
|
||||||
proxy_set_header Host $host; |
|
||||||
proxy_set_header X-Real-IP $remote_addr; |
|
||||||
proxy_set_header x_real_ipP $remote_addr; |
|
||||||
proxy_set_header remote_addr $remote_addr; |
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
|
||||||
proxy_http_version 1.1; |
|
||||||
proxy_connect_timeout 300s; |
|
||||||
proxy_read_timeout 300s; |
|
||||||
proxy_send_timeout 300s; |
|
||||||
proxy_set_header Upgrade $http_upgrade; |
|
||||||
proxy_set_header Connection "upgrade"; |
|
||||||
} |
|
||||||
#error_page 404 /404.html; |
|
||||||
# redirect server error pages to the static page /50x.html |
|
||||||
# |
|
||||||
error_page 500 502 503 504 /50x.html; |
|
||||||
location = /50x.html { |
|
||||||
root /usr/share/nginx/html; |
|
||||||
} |
|
||||||
} |
|
@ -1,24 +1,8 @@ |
|||||||
#!/bin/bash |
#!/bin/bash |
||||||
# |
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
|
||||||
# contributor license agreements. See the NOTICE file distributed with |
|
||||||
# this work for additional information regarding copyright ownership. |
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
|
||||||
# (the "License"); you may not use this file except in compliance with |
|
||||||
# the License. You may obtain a copy of the License at |
|
||||||
# |
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0 |
|
||||||
# |
|
||||||
# Unless required by applicable law or agreed to in writing, software |
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, |
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
||||||
# See the License for the specific language governing permissions and |
|
||||||
# limitations under the License. |
|
||||||
# |
|
||||||
|
|
||||||
echo "------ dolphinscheduler start - build -------" |
echo "------ escheduler start - build -------" |
||||||
printenv |
printenv |
||||||
|
|
||||||
docker build --build-arg version=$version --build-arg tar_version=$tar_version -t $DOCKER_REPO:$version . |
docker build --build-arg version=$version --build-arg tar_version=$tar_version -t $DOCKER_REPO:$version . |
||||||
|
|
||||||
echo "------ dolphinscheduler end - build -------" |
echo "------ escheduler end - build -------" |
||||||
|
@ -0,0 +1,16 @@ |
|||||||
|
Easy Scheduler Release 1.0.1 |
||||||
|
=== |
||||||
|
Easy Scheduler 1.0.1 is the second version in the 1.x series. The update is as follows: |
||||||
|
|
||||||
|
- 1,outlook TSL email support |
||||||
|
- 2,servlet and protobuf jar conflict resolution |
||||||
|
- 3,create a tenant and establish a Linux user at the same time |
||||||
|
- 4,the re-run time is negative |
||||||
|
- 5,stand-alone and cluster can be deployed with one click of install.sh |
||||||
|
- 6,queue support interface added |
||||||
|
- 7,escheduler.t_escheduler_queue added create_time and update_time fields |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,49 @@ |
|||||||
|
Easy Scheduler Release 1.0.2 |
||||||
|
=== |
||||||
|
Easy Scheduler 1.0.2 is the third version in the 1.x series. This version adds scheduling open interfaces, worker grouping (the machine group for which the specified task runs), task flow and service monitoring, and support for oracle, clickhouse, etc., as follows: |
||||||
|
|
||||||
|
New features: |
||||||
|
=== |
||||||
|
- [[EasyScheduler-79](https://github.com/analysys/EasyScheduler/issues/79)] scheduling the open interface through the token mode, which can be operated through the api. |
||||||
|
- [[EasyScheduler-138](https://github.com/analysys/EasyScheduler/issues/138)] can specify the machine (group) where the task runs. |
||||||
|
- [[EasyScheduler-139](https://github.com/analysys/EasyScheduler/issues/139)] task Process Monitoring and Master, Worker, Zookeeper Operation Status Monitoring |
||||||
|
- [[EasyScheduler-140](https://github.com/analysys/EasyScheduler/issues/140)] workflow Definition - Increase Process Timeout Alarm |
||||||
|
- [[EasyScheduler-134](https://github.com/analysys/EasyScheduler/issues/134)] task type supports Oracle, CLICKHOUSE, SQLSERVER, IMPALA |
||||||
|
- [[EasyScheduler-136](https://github.com/analysys/EasyScheduler/issues/136)] sql task node can independently select CC mail users |
||||||
|
- [[EasyScheduler-141](https://github.com/analysys/EasyScheduler/issues/141)] user Management—Users can bind queues. The user queue level is higher than the tenant queue level. If the user queue is empty, look for the tenant queue. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Enhanced: |
||||||
|
=== |
||||||
|
- [[EasyScheduler-154](https://github.com/analysys/EasyScheduler/issues/154)] Tenant code allows encoding of pure numbers or underscores |
||||||
|
|
||||||
|
|
||||||
|
Repair: |
||||||
|
=== |
||||||
|
- [[EasyScheduler-135](https://github.com/analysys/EasyScheduler/issues/135)] Python task can specify python version |
||||||
|
|
||||||
|
- [[EasyScheduler-125](https://github.com/analysys/EasyScheduler/issues/125)] The mobile phone number in the user account does not recognize the opening of Unicom's latest number 166 |
||||||
|
|
||||||
|
- [[EasyScheduler-178](https://github.com/analysys/EasyScheduler/issues/178)] Fix subtle spelling mistakes in ProcessDao |
||||||
|
|
||||||
|
- [[EasyScheduler-129](https://github.com/analysys/EasyScheduler/issues/129)] Tenant code, underlined and other special characters cannot pass the check. |
||||||
|
|
||||||
|
|
||||||
|
Thank: |
||||||
|
=== |
||||||
|
Last but not least, no new version was born without the contributions of the following partners: |
||||||
|
|
||||||
|
Baoqi , chubbyjiang , coreychen , chgxtony, cmdares , datuzi , dingchao, fanguanqun , 风清扬, gaojun416 , googlechorme, hyperknob , hujiang75277381 , huanzui , kinssun, ivivi727 ,jimmy, jiangzhx , kevin5210 , lidongdai , lshmouse , lenboo, lyf198972 , lgcareer , lzy305 , moranrr , millionfor , mazhong8808, programlief, qiaozhanwei , roy110 , swxchappy , sherlock111 , samz406 , swxchappy, qq389401879 , lzy305, vkingnew, William-GuoWei , woniulinux, yyl861, zhangxin1988, yangjiajun2014, yangqinlong, yangjiajun2014, zhzhenqin, zhangluck, zhanghaicheng1, zhuyizhizhi |
||||||
|
|
||||||
|
And many enthusiastic partners in the WeChat group! Thank you very much! |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,30 @@ |
|||||||
|
Easy Scheduler Release 1.0.3 |
||||||
|
=== |
||||||
|
Easy Scheduler 1.0.3 is the fourth version in the 1.x series. |
||||||
|
|
||||||
|
Enhanced: |
||||||
|
=== |
||||||
|
- [[EasyScheduler-482]](https://github.com/analysys/EasyScheduler/issues/482)sql task mail header added support for custom variables |
||||||
|
- [[EasyScheduler-483]](https://github.com/analysys/EasyScheduler/issues/483)sql task failed to send mail, then this sql task is failed |
||||||
|
- [[EasyScheduler-484]](https://github.com/analysys/EasyScheduler/issues/484)modify the replacement rule of the custom variable in the sql task, and support the replacement of multiple single quotes and double quotes. |
||||||
|
- [[EasyScheduler-485]](https://github.com/analysys/EasyScheduler/issues/485)when creating a resource file, increase the verification that the resource file already exists on hdfs |
||||||
|
|
||||||
|
Repair: |
||||||
|
=== |
||||||
|
- [[EasyScheduler-198]](https://github.com/analysys/EasyScheduler/issues/198) the process definition list is sorted according to the timing status and update time |
||||||
|
- [[EasyScheduler-419]](https://github.com/analysys/EasyScheduler/issues/419) fixes online creation of files, hdfs file is not created, but returns successfully |
||||||
|
- [[EasyScheduler-481] ](https://github.com/analysys/EasyScheduler/issues/481)fixes the problem that the job does not exist at the same time. |
||||||
|
- [[EasyScheduler-425]](https://github.com/analysys/EasyScheduler/issues/425) kills the kill of its child process when killing the task |
||||||
|
- [[EasyScheduler-422]](https://github.com/analysys/EasyScheduler/issues/422) fixed an issue where the update time and size were not updated when updating resource files |
||||||
|
- [[EasyScheduler-431]](https://github.com/analysys/EasyScheduler/issues/431) fixed an issue where deleting a tenant failed if hdfs was not started when the tenant was deleted |
||||||
|
- [[EasyScheduler-485]](https://github.com/analysys/EasyScheduler/issues/486) the shell process exits, the yarn state is not final and waits for judgment. |
||||||
|
|
||||||
|
Thank: |
||||||
|
=== |
||||||
|
Last but not least, no new version was born without the contributions of the following partners: |
||||||
|
|
||||||
|
Baoqi, jimmy201602, samz406, petersear, millionfor, hyperknob, fanguanqun, yangqinlong, qq389401879, |
||||||
|
feloxx, coding-now, hymzcn, nysyxxg, chgxtony |
||||||
|
|
||||||
|
And many enthusiastic partners in the WeChat group! Thank you very much! |
||||||
|
|
@ -0,0 +1,55 @@ |
|||||||
|
Easy Scheduler Release 1.1.0 |
||||||
|
=== |
||||||
|
Easy Scheduler 1.1.0 is the first release in the 1.1.x series. |
||||||
|
|
||||||
|
New features: |
||||||
|
=== |
||||||
|
- [[EasyScheduler-391](https://github.com/analysys/EasyScheduler/issues/391)] run a process under a specified tenement user |
||||||
|
- [[EasyScheduler-288](https://github.com/analysys/EasyScheduler/issues/288)] feature/qiye_weixin |
||||||
|
- [[EasyScheduler-189](https://github.com/analysys/EasyScheduler/issues/189)] security support such as Kerberos |
||||||
|
- [[EasyScheduler-398](https://github.com/analysys/EasyScheduler/issues/398)]dministrator, with tenants (install.sh set default tenant), can create resources, projects and data sources (limited to one administrator) |
||||||
|
- [[EasyScheduler-293](https://github.com/analysys/EasyScheduler/issues/293)]click on the parameter selected when running the process, there is no place to view, no save |
||||||
|
- [[EasyScheduler-401](https://github.com/analysys/EasyScheduler/issues/401)]timing is easy to time every second. After the timing is completed, you can display the next trigger time on the page. |
||||||
|
- [[EasyScheduler-493](https://github.com/analysys/EasyScheduler/pull/493)]add datasource kerberos auth and FAQ modify and add resource upload s3 |
||||||
|
|
||||||
|
|
||||||
|
Enhanced: |
||||||
|
=== |
||||||
|
- [[EasyScheduler-227](https://github.com/analysys/EasyScheduler/issues/227)] upgrade spring-boot to 2.1.x and spring to 5.x |
||||||
|
- [[EasyScheduler-434](https://github.com/analysys/EasyScheduler/issues/434)] number of worker nodes zk and mysql are inconsistent |
||||||
|
- [[EasyScheduler-435](https://github.com/analysys/EasyScheduler/issues/435)]authentication of the mailbox format |
||||||
|
- [[EasyScheduler-441](https://github.com/analysys/EasyScheduler/issues/441)] prohibits running nodes from joining completed node detection |
||||||
|
- [[EasyScheduler-400](https://github.com/analysys/EasyScheduler/issues/400)] Home page, queue statistics are not harmonious, command statistics have no data |
||||||
|
- [[EasyScheduler-395](https://github.com/analysys/EasyScheduler/issues/395)] For fault-tolerant recovery processes, the status cannot be ** is running |
||||||
|
- [[EasyScheduler-529](https://github.com/analysys/EasyScheduler/issues/529)] optimize poll task from zookeeper |
||||||
|
- [[EasyScheduler-242](https://github.com/analysys/EasyScheduler/issues/242)]worker-server node gets task performance problem |
||||||
|
- [[EasyScheduler-352](https://github.com/analysys/EasyScheduler/issues/352)]worker grouping, queue consumption problem |
||||||
|
- [[EasyScheduler-461](https://github.com/analysys/EasyScheduler/issues/461)]view data source parameters, need to encrypt account password information |
||||||
|
- [[EasyScheduler-396](https://github.com/analysys/EasyScheduler/issues/396)]Dockerfile optimization, and associated Dockerfile and github to achieve automatic mirroring |
||||||
|
- [[EasyScheduler-389](https://github.com/analysys/EasyScheduler/issues/389)]service monitor cannot find the change of master/worker |
||||||
|
- [[EasyScheduler-511](https://github.com/analysys/EasyScheduler/issues/511)]support recovery process from stop/kill nodes. |
||||||
|
- [[EasyScheduler-399](https://github.com/analysys/EasyScheduler/issues/399)]HadoopUtils specifies user actions instead of **Deploying users |
||||||
|
|
||||||
|
Repair: |
||||||
|
=== |
||||||
|
- [[EasyScheduler-394](https://github.com/analysys/EasyScheduler/issues/394)] When the master&worker is deployed on the same machine, if the master&worker service is restarted, the previously scheduled tasks cannot be scheduled. |
||||||
|
- [[EasyScheduler-469](https://github.com/analysys/EasyScheduler/issues/469)]Fix naming errors,monitor page |
||||||
|
- [[EasyScheduler-392](https://github.com/analysys/EasyScheduler/issues/392)]Feature request: fix email regex check |
||||||
|
- [[EasyScheduler-405](https://github.com/analysys/EasyScheduler/issues/405)]timed modification/addition page, start time and end time cannot be the same |
||||||
|
- [[EasyScheduler-517](https://github.com/analysys/EasyScheduler/issues/517)]complement - subworkflow - time parameter |
||||||
|
- [[EasyScheduler-532](https://github.com/analysys/EasyScheduler/issues/532)] python node does not execute the problem |
||||||
|
- [[EasyScheduler-543](https://github.com/analysys/EasyScheduler/issues/543)]optimize datasource connection params safety |
||||||
|
- [[EasyScheduler-569](https://github.com/analysys/EasyScheduler/issues/569)] timed tasks can't really stop |
||||||
|
- [[EasyScheduler-463](https://github.com/analysys/EasyScheduler/issues/463)]mailbox verification does not support very suffixed mailboxes |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Thank: |
||||||
|
=== |
||||||
|
Last but not least, no new version was born without the contributions of the following partners: |
||||||
|
|
||||||
|
Baoqi, jimmy201602, samz406, petersear, millionfor, hyperknob, fanguanqun, yangqinlong, qq389401879, chgxtony, Stanfan, lfyee, thisnew, hujiang75277381, sunnyingit, lgbo-ustc, ivivi, lzy305, JackIllkid, telltime, lipengbo2018, wuchunfu, telltime |
||||||
|
|
||||||
|
And many enthusiastic partners in the WeChat group! Thank you very much! |
||||||
|
|
@ -0,0 +1,299 @@ |
|||||||
|
# EasyScheduler Proposal |
||||||
|
|
||||||
|
## Abstract |
||||||
|
|
||||||
|
EasyScheduler is a distributed ETL scheduling engine with powerful DAG visualization interface. EasyScheduler focuses on solving the problem of 'complex task dependencies & triggers ' in data processing. Just like its name, we dedicated to making the scheduling system `out of the box`. |
||||||
|
|
||||||
|
## Proposal |
||||||
|
|
||||||
|
EasyScheduler provides many easy-to-use features to accelerate the engineer efficiency on data ETL workflow job. We propose a new concept of 'instance of process' and 'instance of task' to let developers to tuning their jobs on the running state of workflow instead of changing the task's template. Its main objectives are as follows: |
||||||
|
|
||||||
|
- Define the complex tasks' dependencies & triggers in a DAG graph by dragging and dropping. |
||||||
|
- Support cluster HA. |
||||||
|
- Support multi-tenant and parallel or serial backfilling data. |
||||||
|
- Support automatical failure job retry and recovery. |
||||||
|
- Support many data task types and process priority, task priority and relative task timeout alarm. |
||||||
|
|
||||||
|
For now, EasyScheduler has a fairly huge community in China. |
||||||
|
It is also widely adopted by many [companies and organizations](https://github.com/analysys/EasyScheduler/issues/57) as its ETL scheduling tool. |
||||||
|
|
||||||
|
We believe that bringing EasyScheduler into ASF could advance development of a much more stronger and more diverse open source community. |
||||||
|
|
||||||
|
Analysys submits this proposal to donate EasyScheduler's source codes and all related documentations to Apache Software Foundation. |
||||||
|
The codes are already under Apache License Version 2.0. |
||||||
|
|
||||||
|
- Code base: https://www.github.com/analysys/easyscheduler |
||||||
|
- English Documentations: <https://analysys.github.io/easyscheduler_docs> |
||||||
|
- Chinese Documentations: <https://analysys.github.io/easyscheduler_docs_cn> |
||||||
|
|
||||||
|
## Background |
||||||
|
|
||||||
|
We want to find a data processing tool with the following features: |
||||||
|
|
||||||
|
- Easy to use,developers can build a ETL process with a very simple drag and drop operation. not only for ETL developers,people who can't write code also can use this tool for ETL operation such as system administrator. |
||||||
|
- Solving the problem of "complex task dependencies" , and it can monitor the ETL running status. |
||||||
|
- Support multi-tenant. |
||||||
|
- Support many task types: Shell, MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Sub_Process, Procedure, etc. |
||||||
|
- Support HA and linear scalability. |
||||||
|
|
||||||
|
For the above reasons, we realized that no existing product met our requirements, so we decided to develop this tool ourselves. We designed EasyScheduler at the end of 2017. The first internal use version was completed in May 2018. We then iterated several internal versions and the system gradually became stabilized. |
||||||
|
|
||||||
|
Then we open the source code of EasyScheduler on March 2019. It soon gained lot's of ETL developers interest and stars on github. |
||||||
|
|
||||||
|
## Rationale |
||||||
|
|
||||||
|
Many organizations (>30) (refer to [Who is using EasyScheduler](https://github.com/analysys/EasyScheduler/issues/57) ) already benefit from running EasyScheduler to make data process pipelines more easier. More than 100 [feature ideas](https://github.com/analysys/EasyScheduler/projects/1) come from EasyScheduler community. Some 3rd-party projects also plan to integrate with EasyScheduler through task plugin, such as [Scriptis](https://github.com/WeBankFinTech/Scriptis), [waterdrop](https://github.com/InterestingLab/waterdrop). These will strengthen the features of EasyScheduler. |
||||||
|
|
||||||
|
## Current Status |
||||||
|
|
||||||
|
### Meritocracy |
||||||
|
|
||||||
|
EasyScheduler was incubated at Analysys in 2017 and open sourced on GitHub in March 2019. Once open sourced, we have been quickly adopted by multiple organizations,EasyScheduler has contributors and users from many companies; we have set up the Committer Team. New contributors are guided and reviewed by existed committer members. |
||||||
|
Contributions are always welcomed and highly valued. |
||||||
|
|
||||||
|
### Community |
||||||
|
|
||||||
|
Now we have set development teams for EasyScheduler in Analysys, and we already have external developers who contributed the code. We already have a user group of more than 1,000 people. |
||||||
|
We hope to grow the base of contributors by inviting all those who offer contributions through The Apache Way. |
||||||
|
Right now, we make use of github as code hosting as well as gitter for community communication. |
||||||
|
|
||||||
|
### Core Developers |
||||||
|
|
||||||
|
The core developers, including experienced senior developers, are often guided by mentors. |
||||||
|
|
||||||
|
## Known Risks |
||||||
|
|
||||||
|
### Orphaned products |
||||||
|
|
||||||
|
EasyScheduler is widely adopted in China by many [companies and organizations](https://github.com/analysys/EasyScheduler/issues/57). The core developers of EasyScheduler team plan to work full time on this project. Currently there are 10 use cases with more that 1000 activity tasks per day using EasyScheduler in the user's production environment. There is very little risk of EasyScheduler getting orphaned as at least two large companies (xueqiu、fengjr) are widely using it in their production, and developers from these companies have also joined Easy Scheduler's team of contributors, EasyScheduler has eight major releases so far, and and received 373 pull requests from contributors, which further demonstrates EasyScheduler as a very active project. We also plan to extend and diversify this community further through Apache. |
||||||
|
|
||||||
|
Thus, it is very unlikely that EasyScheduler becomes orphaned. |
||||||
|
|
||||||
|
### Inexperience with Open Source |
||||||
|
|
||||||
|
EasyScheduler's core developers have been running it as a community-oriented open source project for some time, several of them already have experience working with open source communities, they are also active in presto, alluxio and other projects. At the same time, we will learn more open source experiences by following the Apache way in our incubator journey. |
||||||
|
|
||||||
|
### Homogeneous Developers |
||||||
|
|
||||||
|
The current developers work across a variety of organizations including Analysys, guandata and hydee; |
||||||
|
some individual developers are accepted as developers of EasyScheduler as well. |
||||||
|
Considering that fengjr and sefonsoft have shown great interests in EasyScheduler, we plan to encourage them to contribute and invite them as contributors to work together. |
||||||
|
|
||||||
|
### Reliance on Salaried Developers |
||||||
|
|
||||||
|
At present, eight of the core developers are paid by their employer to contribute to EasyScheduler project. |
||||||
|
we also have some other developers and researchers taking part in the project, and we will make efforts to increase the diversity of the contributors and actively lobby for Domain experts in the workflow space to contribute. |
||||||
|
|
||||||
|
### Relationships with Other Apache Products |
||||||
|
|
||||||
|
EasyScheduler integrates Apache Zookeeper as one of the service registration/discovery mechanisms. EasyScheduler is deeply integrated with Apache products. It currently support many task types like Apache Hive, Apache Spark, Apache Hadoop, and so on |
||||||
|
|
||||||
|
### A Excessive Fascination with the Apache Brand |
||||||
|
|
||||||
|
We recognize the value and reputation that the Apache brand will bring to EasyScheduler. |
||||||
|
However, we prefer that the community provided by the Apache Software Foundation will enable the project to achieve long-term stable development. so EasyScheduler is proposing to enter incubation at Apache in order to help efforts to diversify the community, not so much to capitalize on the Apache brand. |
||||||
|
|
||||||
|
## Documentation |
||||||
|
|
||||||
|
A complete set of EasyScheduler documentations is provided on github in both English and Simplified Chinese. |
||||||
|
|
||||||
|
- [English](https://github.com/analysys/easyscheduler_docs) |
||||||
|
- [Chinese](https://github.com/analysys/easyscheduler_docs_cn) |
||||||
|
|
||||||
|
## Initial Source |
||||||
|
|
||||||
|
The project consists of three distinct codebases: core and document. The address of two existed git repositories are as follows: |
||||||
|
|
||||||
|
- <https://github.com/analysys/easyscheduler> |
||||||
|
- <https://github.com/analysys/easyscheduler_docs> |
||||||
|
- <https://github.com/analysys/easyscheduler_docs_cn> |
||||||
|
|
||||||
|
## Source and Intellectual Property Submission Plan |
||||||
|
|
||||||
|
As soon as EasyScheduler is approved to join Apache Incubator, Analysys will provide the Software Grant Agreement(SGA) and initial committers will submit ICLA(s). The code is already licensed under the Apache Software License, version 2.0. |
||||||
|
|
||||||
|
## External Dependencies |
||||||
|
|
||||||
|
As all backend code dependencies are managed using Apache Maven, none of the external libraries need to be packaged in a source distribution. |
||||||
|
|
||||||
|
Most of dependencies have Apache compatible licenses,and the core dependencies are as follows: |
||||||
|
|
||||||
|
### Backend Dependency |
||||||
|
|
||||||
|
| Dependency | License | Comments | |
||||||
|
| ------------------------------------------------------ | ------------------------------------------------------------ | ------------- | |
||||||
|
| bonecp-0.8.0.RELEASE.jar | Apache v2.0 | | |
||||||
|
| byte-buddy-1.9.10.jar | Apache V2.0 | | |
||||||
|
| c3p0-0.9.1.1.jar | GNU LESSER GENERAL PUBLIC LICENSE | will remove | |
||||||
|
| curator-*-2.12.0.jar | Apache V2.0 | | |
||||||
|
| druid-1.1.14.jar | Apache V2.0 | | |
||||||
|
| fastjson-1.2.29.jar | Apache V2.0 | | |
||||||
|
| fastutil-6.5.6.jar | Apache V2.0 | | |
||||||
|
| grpc-*-1.9.0.jar | Apache V2.0 | | |
||||||
|
| gson-2.8.5.jar | Apache V2.0 | | |
||||||
|
| guava-20.0.jar | Apache V2.0 | | |
||||||
|
| guice-*3.0.jar | Apache V2.0 | | |
||||||
|
| hadoop-*-2.7.3.jar | Apache V2.0 | | |
||||||
|
| hbase-*-1.1.1.jar | Apache V2.0 | | |
||||||
|
| hive-*-2.1.0.jar | Apache V2.0 | | |
||||||
|
| instrumentation-api-0.4.3.jar | Apache V2.0 | | |
||||||
|
| jackson-*-2.9.8.jar | Apache V2.0 | | |
||||||
|
| jackson-jaxrs-1.8.3.jar | LGPL Version 2.1 Apache V2.0 | will remove | |
||||||
|
| jackson-xc-1.8.3.jar | LGPL Version 2.1 Apache V2.0 | will remove | |
||||||
|
| javax.activation-api-1.2.0.jar | CDDL/GPLv2+CE | will remove | |
||||||
|
| javax.annotation-api-1.3.2.jar | CDDL + GPLv2 with classpath exception | will remove | |
||||||
|
| javax.servlet-api-3.1.0.jar | CDDL + GPLv2 with classpath exception | will remove | |
||||||
|
| jaxb-*.jar | (CDDL 1.1) (GPL2 w/ CPE) | will remove | |
||||||
|
| jersey-*-1.9.jar | CDDL+GPLv2 | will remove | |
||||||
|
| jetty-*-9.4.14.v20181114.jar | Apache V2.0,EPL 1.0 | | |
||||||
|
| jna-4.5.2.jar | Apache V2.0,LGPL 2.1 | will remove | |
||||||
|
| jna-platform-4.5.2.jar | Apache V2.0,LGPL 2.1 | will remove | |
||||||
|
| jsp-api-2.x.jar | CDDL,GPL 2.0 | will remove | |
||||||
|
| log4j-1.2.17.jar | Apache V2.0 | | |
||||||
|
| log4j-*-2.11.2.jar | Apache V2.0 | | |
||||||
|
| logback-x.jar | dual-license EPL 1.0,LGPL 2.1 | | |
||||||
|
| mail-1.4.5.jar | CDDL+GPLv2 | will remove | |
||||||
|
| mybatis-3.5.1.jar | Apache V2.0 | | |
||||||
|
| mybatis-spring-*2.0.1.jar | Apache V2.0 | | |
||||||
|
| mysql-connector-java-5.1.34.jar | GPL 2.0 | will remove | |
||||||
|
| netty-*-4.1.33.Final.jar | Apache V2.0 | | |
||||||
|
| oshi-core-3.5.0.jar | EPL 1.0 | | |
||||||
|
| parquet-hadoop-bundle-1.8.1.jar | Apache V2.0 | | |
||||||
|
| postgresql-42.1.4.jar | BSD 2-clause | | |
||||||
|
| protobuf-java-*3.5.1.jar | BSD 3-clause | | |
||||||
|
| quartz-2.2.3.jar | Apache V2.0 | | |
||||||
|
| quartz-jobs-2.2.3.jar | Apache V2.0 | | |
||||||
|
| slf4j-api-1.7.5.jar | MIT | | |
||||||
|
| spring-*-5.1.5.RELEASE.jar | Apache V2.0 | | |
||||||
|
| spring-beans-5.1.5.RELEASE.jar | Apache V2.0 | | |
||||||
|
| spring-boot-*2.1.3.RELEASE.jar | Apache V2.0 | | |
||||||
|
| springfox-*-2.9.2.jar | Apache V2.0 | | |
||||||
|
| stringtemplate-3.2.1.jar | BSD | | |
||||||
|
| swagger-annotations-1.5.20.jar | Apache V2.0 | | |
||||||
|
| swagger-bootstrap-ui-1.9.3.jar | Apache V2.0 | | |
||||||
|
| swagger-models-1.5.20.jar | Apache V2.0 | | |
||||||
|
| zookeeper-3.4.8.jar | Apache | | |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
The front-end UI currently relies on many components, and the core dependencies are as follows: |
||||||
|
|
||||||
|
### UI Dependency |
||||||
|
|
||||||
|
| Dependency | License | Comments | |
||||||
|
| ------------------------------------------------------- | ------------------------------------ | ----------- | |
||||||
|
| autoprefixer | MIT | | |
||||||
|
| babel-core | MIT | | |
||||||
|
| babel-eslint | MIT | | |
||||||
|
| babel-helper-* | MIT | | |
||||||
|
| babel-helpers | MIT | | |
||||||
|
| babel-loader | MIT | | |
||||||
|
| babel-plugin-syntax-* | MIT | | |
||||||
|
| babel-plugin-transform-* | MIT | | |
||||||
|
| babel-preset-env | MIT | | |
||||||
|
| babel-runtime | MIT | | |
||||||
|
| bootstrap | MIT | | |
||||||
|
| canvg | MIT | | |
||||||
|
| clipboard | MIT | | |
||||||
|
| codemirror | MIT | | |
||||||
|
| copy-webpack-plugin | MIT | | |
||||||
|
| cross-env | MIT | | |
||||||
|
| css-loader | MIT | | |
||||||
|
| cssnano | MIT | | |
||||||
|
| cyclist | MIT | | |
||||||
|
| d3 | BSD-3-Clause | | |
||||||
|
| dayjs | MIT | | |
||||||
|
| echarts | Apache V2.0 | | |
||||||
|
| env-parse | ISC | | |
||||||
|
| extract-text-webpack-plugin | MIT | | |
||||||
|
| file-loader | MIT | | |
||||||
|
| globby | MIT | | |
||||||
|
| html-loader | MIT | | |
||||||
|
| html-webpack-ext-plugin | MIT | | |
||||||
|
| html-webpack-plugin | MIT | | |
||||||
|
| html2canvas | MIT | | |
||||||
|
| jsplumb | (MIT OR GPL-2.0) | | |
||||||
|
| lodash | MIT | | |
||||||
|
| node-sass | MIT | | |
||||||
|
| optimize-css-assets-webpack-plugin | MIT | | |
||||||
|
| postcss-loader | MIT | | |
||||||
|
| rimraf | ISC | | |
||||||
|
| sass-loader | MIT | | |
||||||
|
| uglifyjs-webpack-plugin | MIT | | |
||||||
|
| url-loader | MIT | | |
||||||
|
| util.promisify | MIT | | |
||||||
|
| vue | MIT | | |
||||||
|
| vue-loader | MIT | | |
||||||
|
| vue-style-loader | MIT | | |
||||||
|
| vue-template-compiler | MIT | | |
||||||
|
| vuex-router-sync | MIT | | |
||||||
|
| watchpack | MIT | | |
||||||
|
| webpack | MIT | | |
||||||
|
| webpack-dev-server | MIT | | |
||||||
|
| webpack-merge | MIT | | |
||||||
|
| xmldom | MIT,LGPL | will remove | |
||||||
|
|
||||||
|
|
||||||
|
## Required Resources |
||||||
|
|
||||||
|
### Git Repositories |
||||||
|
|
||||||
|
- <https://github.com/analysys/EasyScheduler.git> |
||||||
|
- <https://github.com/analysys/easyscheduler_docs.git> |
||||||
|
- <https://github.com/analysys/easyscheduler_docs_cn.git> |
||||||
|
|
||||||
|
### Issue Tracking |
||||||
|
|
||||||
|
The community would like to continue using GitHub Issues. |
||||||
|
|
||||||
|
### Continuous Integration tool |
||||||
|
|
||||||
|
Jenkins |
||||||
|
|
||||||
|
### Mailing Lists |
||||||
|
|
||||||
|
- EasyScheduler-dev: for development discussions |
||||||
|
- EasyScheduler-private: for PPMC discussions |
||||||
|
- EasyScheduler-notifications: for users notifications |
||||||
|
|
||||||
|
## Initial Committers |
||||||
|
|
||||||
|
- William-GuoWei(guowei20m@outlook.com) |
||||||
|
- Lidong Dai(lidong.dai@outlook.com) |
||||||
|
- Zhanwei Qiao(qiaozhanwei@outlook.com) |
||||||
|
- Liang Bao(baoliang.leon@gmail.com) |
||||||
|
- Gang Li(lgcareer2019@outlook.com) |
||||||
|
- Zijian Gong(quanquansy@gmail.com) |
||||||
|
- Jun Gao(gaojun2048@gmail.com) |
||||||
|
- Baoqi Wu(wubaoqi@gmail.com) |
||||||
|
|
||||||
|
## Affiliations |
||||||
|
|
||||||
|
- Analysys Inc: William-GuoWei,Zhanwei Qiao,Liang Bao,Gang Li,Jun Gao,Lidong Dai |
||||||
|
|
||||||
|
- Hydee Inc: Zijian Gong |
||||||
|
|
||||||
|
- Guandata Inc: Baoqi Wu |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Sponsors |
||||||
|
|
||||||
|
### Champion |
||||||
|
|
||||||
|
- Sheng Wu ( Apache Incubator PMC, [wusheng@apache.org](mailto:wusheng@apache.org)) |
||||||
|
|
||||||
|
### Mentors |
||||||
|
|
||||||
|
- Sheng Wu ( Apache Incubator PMC, [wusheng@apache.org](mailto:wusheng@apache.org)) |
||||||
|
|
||||||
|
- ShaoFeng Shi ( Apache Incubator PMC, [shaofengshi@apache.org](mailto:wusheng@apache.org)) |
||||||
|
|
||||||
|
- Liang Chen ( Apache Software Foundation Member, [chenliang613@apache.org](mailto:chenliang613@apache.org)) |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Sponsoring Entity |
||||||
|
|
||||||
|
We are expecting the Apache Incubator could sponsor this project. |
@ -0,0 +1,50 @@ |
|||||||
|
# Summary |
||||||
|
|
||||||
|
* [Instruction](README.md) |
||||||
|
|
||||||
|
* Frontend Deployment |
||||||
|
* [Preparations](frontend-deployment.md#Preparations) |
||||||
|
* [Deployment](frontend-deployment.md#Deployment) |
||||||
|
* [FAQ](frontend-deployment.md#FAQ) |
||||||
|
|
||||||
|
* Backend Deployment |
||||||
|
* [Preparations](backend-deployment.md#Preparations) |
||||||
|
* [Deployment](backend-deployment.md#Deployment) |
||||||
|
|
||||||
|
* [Quick Start](quick-start.md#Quick Start) |
||||||
|
|
||||||
|
* System Use Manual |
||||||
|
* [Operational Guidelines](system-manual.md#Operational Guidelines) |
||||||
|
* [Security](system-manual.md#Security) |
||||||
|
* [Monitor center](system-manual.md#Monitor center) |
||||||
|
* [Task Node Type and Parameter Setting](system-manual.md#Task Node Type and Parameter Setting) |
||||||
|
* [System parameter](system-manual.md#System parameter) |
||||||
|
|
||||||
|
* [Architecture Design](architecture-design.md) |
||||||
|
|
||||||
|
* Front-end development |
||||||
|
* [Development environment](frontend-development.md#Development environment) |
||||||
|
* [Project directory structure](frontend-development.md#Project directory structure) |
||||||
|
* [System function module](frontend-development.md#System function module) |
||||||
|
* [Routing and state management](frontend-development.md#Routing and state management) |
||||||
|
* [specification](frontend-development.md#specification) |
||||||
|
* [interface](frontend-development.md#interface) |
||||||
|
* [Extended development](frontend-development.md#Extended development) |
||||||
|
|
||||||
|
* Backend development documentation |
||||||
|
* [Environmental requirements](backend-development.md#Environmental requirements) |
||||||
|
* [Project compilation](backend-development.md#Project compilation) |
||||||
|
* [Interface documentation](http://52.82.13.76:8888/escheduler/doc.html?language=en_US&lang=en) |
||||||
|
* FAQ |
||||||
|
* [FAQ](EasyScheduler-FAQ.md) |
||||||
|
* EasyScheduler upgrade documentation |
||||||
|
* [upgrade documentation](upgrade.md) |
||||||
|
* History release notes |
||||||
|
* [1.1.0 release](1.1.0-release.md) |
||||||
|
* [1.0.5 release](1.0.5-release.md) |
||||||
|
* [1.0.4 release](1.0.4-release.md) |
||||||
|
* [1.0.3 release](1.0.3-release.md) |
||||||
|
* [1.0.2 release](1.0.2-release.md) |
||||||
|
* [1.0.1 release](1.0.1-release.md) |
||||||
|
* [1.0.0 release] |
||||||
|
|
@ -0,0 +1,316 @@ |
|||||||
|
## Architecture Design |
||||||
|
Before explaining the architecture of the schedule system, let us first understand the common nouns of the schedule system. |
||||||
|
|
||||||
|
### 1.Noun Interpretation |
||||||
|
|
||||||
|
**DAG:** Full name Directed Acyclic Graph,referred to as DAG。Tasks in the workflow are assembled in the form of directed acyclic graphs, which are topologically traversed from nodes with zero indegrees of ingress until there are no successor nodes. For example, the following picture: |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dag_examples_cn.jpg" alt="dag示例" width="60%" /> |
||||||
|
<p align="center"> |
||||||
|
<em>dag example</em> |
||||||
|
</p> |
||||||
|
</p> |
||||||
|
|
||||||
|
**Process definition**: Visualization **DAG** by dragging task nodes and establishing associations of task nodes |
||||||
|
|
||||||
|
**Process instance**: A process instance is an instantiation of a process definition, which can be generated by manual startup or scheduling. The process definition runs once, a new process instance is generated |
||||||
|
|
||||||
|
**Task instance**: A task instance is the instantiation of a specific task node when a process instance runs, which indicates the specific task execution status |
||||||
|
|
||||||
|
**Task type**: Currently supports SHELL, SQL, SUB_PROCESS (sub-process), PROCEDURE, MR, SPARK, PYTHON, DEPENDENT (dependency), and plans to support dynamic plug-in extension, note: the sub-**SUB_PROCESS** is also A separate process definition that can be launched separately |
||||||
|
|
||||||
|
**Schedule mode** : The system supports timing schedule and manual schedule based on cron expressions. Command type support: start workflow, start execution from current node, resume fault-tolerant workflow, resume pause process, start execution from failed node, complement, timer, rerun, pause, stop, resume waiting thread. Where **recovers the fault-tolerant workflow** and **restores the waiting thread** The two command types are used by the scheduling internal control and cannot be called externally |
||||||
|
|
||||||
|
**Timed schedule**: The system uses **quartz** distributed scheduler and supports the generation of cron expression visualization |
||||||
|
|
||||||
|
**Dependency**: The system does not only support **DAG** Simple dependencies between predecessors and successor nodes, but also provides **task dependencies** nodes, support for custom task dependencies between processes** |
||||||
|
|
||||||
|
**Priority**: Supports the priority of process instances and task instances. If the process instance and task instance priority are not set, the default is first in, first out. |
||||||
|
|
||||||
|
**Mail Alert**: Support **SQL Task** Query Result Email Send, Process Instance Run Result Email Alert and Fault Tolerant Alert Notification |
||||||
|
|
||||||
|
**Failure policy**: For tasks running in parallel, if there are tasks that fail, two failure policy processing methods are provided. **Continue** means that the status of the task is run in parallel until the end of the process failure. **End** means that once a failed task is found, Kill also drops the running parallel task and the process ends. |
||||||
|
|
||||||
|
**Complement**: Complement historical data, support ** interval parallel and serial ** two complement methods |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 2.System architecture |
||||||
|
|
||||||
|
#### 2.1 System Architecture Diagram |
||||||
|
<p align="center"> |
||||||
|
<img src="https://user-images.githubusercontent.com/48329107/62609545-8f973480-b934-11e9-9a58-d8133222f14d.png" alt="System Architecture Diagram" /> |
||||||
|
<p align="center"> |
||||||
|
<em>System Architecture Diagram</em> |
||||||
|
</p> |
||||||
|
</p> |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### 2.2 Architectural description |
||||||
|
|
||||||
|
* **MasterServer** |
||||||
|
|
||||||
|
MasterServer adopts the distributed non-central design concept. MasterServer is mainly responsible for DAG task split, task submission monitoring, and monitoring the health status of other MasterServer and WorkerServer. |
||||||
|
When the MasterServer service starts, it registers a temporary node with Zookeeper, and listens to the Zookeeper temporary node state change for fault tolerance processing. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### The service mainly contains: |
||||||
|
|
||||||
|
- **Distributed Quartz** distributed scheduling component, mainly responsible for the start and stop operation of the scheduled task. When the quartz picks up the task, the master internally has a thread pool to be responsible for the subsequent operations of the task. |
||||||
|
|
||||||
|
- **MasterSchedulerThread** is a scan thread that periodically scans the **command** table in the database for different business operations based on different ** command types** |
||||||
|
|
||||||
|
- **MasterExecThread** is mainly responsible for DAG task segmentation, task submission monitoring, logic processing of various command types |
||||||
|
|
||||||
|
- **MasterTaskExecThread** is mainly responsible for task persistence |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
* **WorkerServer** |
||||||
|
|
||||||
|
- WorkerServer also adopts a distributed, non-central design concept. WorkerServer is mainly responsible for task execution and providing log services. When the WorkerServer service starts, it registers the temporary node with Zookeeper and maintains the heartbeat. |
||||||
|
|
||||||
|
##### This service contains: |
||||||
|
|
||||||
|
- **FetchTaskThread** is mainly responsible for continuously receiving tasks from **Task Queue** and calling **TaskScheduleThread** corresponding executors according to different task types. |
||||||
|
- **LoggerServer** is an RPC service that provides functions such as log fragment viewing, refresh and download. |
||||||
|
|
||||||
|
- **ZooKeeper** |
||||||
|
|
||||||
|
The ZooKeeper service, the MasterServer and the WorkerServer nodes in the system all use the ZooKeeper for cluster management and fault tolerance. In addition, the system also performs event monitoring and distributed locking based on ZooKeeper. |
||||||
|
We have also implemented queues based on Redis, but we hope that EasyScheduler relies on as few components as possible, so we finally removed the Redis implementation. |
||||||
|
|
||||||
|
- **Task Queue** |
||||||
|
|
||||||
|
The task queue operation is provided. Currently, the queue is also implemented based on Zookeeper. Since there is less information stored in the queue, there is no need to worry about too much data in the queue. In fact, we have over-measured a million-level data storage queue, which has no effect on system stability and performance. |
||||||
|
|
||||||
|
- **Alert** |
||||||
|
|
||||||
|
Provides alarm-related interfaces. The interfaces mainly include **Alarms**. The storage, query, and notification functions of the two types of alarm data. The notification function has two types: **mail notification** and **SNMP (not yet implemented)**. |
||||||
|
|
||||||
|
- **API** |
||||||
|
|
||||||
|
The API interface layer is mainly responsible for processing requests from the front-end UI layer. The service provides a RESTful api to provide request services externally. |
||||||
|
Interfaces include workflow creation, definition, query, modification, release, offline, manual start, stop, pause, resume, start execution from this node, and more. |
||||||
|
|
||||||
|
- **UI** |
||||||
|
|
||||||
|
The front-end page of the system provides various visual operation interfaces of the system. For details, see the **[System User Manual] (System User Manual.md)** section. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### 2.3 Architectural Design Ideas |
||||||
|
|
||||||
|
##### I. Decentralized vs centralization |
||||||
|
|
||||||
|
###### Centralization Thought |
||||||
|
|
||||||
|
The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into two roles according to their roles: |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_slave.png" alt="master-slave role" width="50%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
- The role of Master is mainly responsible for task distribution and supervising the health status of Slave. It can dynamically balance the task to Slave, so that the Slave node will not be "busy" or "free". |
||||||
|
- The role of the Worker is mainly responsible for the execution of the task and maintains the heartbeat with the Master so that the Master can assign tasks to the Slave. |
||||||
|
|
||||||
|
Problems in the design of centralized : |
||||||
|
|
||||||
|
- Once the Master has a problem, the group has no leader and the entire cluster will crash. In order to solve this problem, most Master/Slave architecture modes adopt the design scheme of the master and backup masters, which can be hot standby or cold standby, automatic switching or manual switching, and more and more new systems are available. Automatically elects the ability to switch masters to improve system availability. |
||||||
|
- Another problem is that if the Scheduler is on the Master, although it can support different tasks in one DAG running on different machines, it will generate overload of the Master. If the Scheduler is on the Slave, all tasks in a DAG can only be submitted on one machine. If there are more parallel tasks, the pressure on the Slave may be larger. |
||||||
|
|
||||||
|
###### Decentralization |
||||||
|
|
||||||
|
<p align="center" |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="decentralized" width="50%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
- In the decentralized design, there is usually no Master/Slave concept, all roles are the same, the status is equal, the global Internet is a typical decentralized distributed system, networked arbitrary node equipment down machine , all will only affect a small range of features. |
||||||
|
- The core design of decentralized design is that there is no "manager" that is different from other nodes in the entire distributed system, so there is no single point of failure problem. However, since there is no "manager" node, each node needs to communicate with other nodes to get the necessary machine information, and the unreliable line of distributed system communication greatly increases the difficulty of implementing the above functions. |
||||||
|
- In fact, truly decentralized distributed systems are rare. Instead, dynamic centralized distributed systems are constantly emerging. Under this architecture, the managers in the cluster are dynamically selected, rather than preset, and when the cluster fails, the nodes of the cluster will spontaneously hold "meetings" to elect new "managers". Go to preside over the work. The most typical case is the Etcd implemented in ZooKeeper and Go. |
||||||
|
|
||||||
|
- Decentralization of EasyScheduler is the registration of Master/Worker to ZooKeeper. The Master Cluster and the Worker Cluster are not centered, and the Zookeeper distributed lock is used to elect one Master or Worker as the “manager” to perform the task. |
||||||
|
|
||||||
|
##### 二、Distributed lock practice |
||||||
|
|
||||||
|
EasyScheduler uses ZooKeeper distributed locks to implement only one Master to execute the Scheduler at the same time, or only one Worker to perform task submission. |
||||||
|
|
||||||
|
1. The core process algorithm for obtaining distributed locks is as follows |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/distributed_lock.png" alt="Get Distributed Lock Process" width="50%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
2. Scheduler thread distributed lock implementation flow chart in EasyScheduler: |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/distributed_lock_procss.png" alt="Get Distributed Lock Process" width="50%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
##### Third, the thread is insufficient loop waiting problem |
||||||
|
|
||||||
|
- If there is no subprocess in a DAG, if the number of data in the Command is greater than the threshold set by the thread pool, the direct process waits or fails. |
||||||
|
- If a large number of sub-processes are nested in a large DAG, the following figure will result in a "dead" state: |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/lack_thread.png" alt="Thread is not enough to wait for loop" width="50%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
In the above figure, MainFlowThread waits for SubFlowThread1 to end, SubFlowThread1 waits for SubFlowThread2 to end, SubFlowThread2 waits for SubFlowThread3 to end, and SubFlowThread3 waits for a new thread in the thread pool, then the entire DAG process cannot end, and thus the thread cannot be released. This forms the state of the child parent process loop waiting. At this point, the scheduling cluster will no longer be available unless a new Master is started to add threads to break such a "stuck." |
||||||
|
|
||||||
|
It seems a bit unsatisfactory to start a new Master to break the deadlock, so we proposed the following three options to reduce this risk: |
||||||
|
|
||||||
|
1. Calculate the sum of the threads of all Masters, and then calculate the number of threads required for each DAG, that is, pre-calculate before the DAG process is executed. Because it is a multi-master thread pool, the total number of threads is unlikely to be obtained in real time. |
||||||
|
2. Judge the single master thread pool. If the thread pool is full, let the thread fail directly. |
||||||
|
3. Add a Command type with insufficient resources. If the thread pool is insufficient, the main process will be suspended. This way, the thread pool has a new thread, which can make the process with insufficient resources hang up and wake up again. |
||||||
|
|
||||||
|
Note: The Master Scheduler thread is FIFO-enabled when it gets the Command. |
||||||
|
|
||||||
|
So we chose the third way to solve the problem of insufficient threads. |
||||||
|
|
||||||
|
##### IV. Fault Tolerant Design |
||||||
|
|
||||||
|
Fault tolerance is divided into service fault tolerance and task retry. Service fault tolerance is divided into two types: Master Fault Tolerance and Worker Fault Tolerance. |
||||||
|
|
||||||
|
###### 1. Downtime fault tolerance |
||||||
|
|
||||||
|
Service fault tolerance design relies on ZooKeeper's Watcher mechanism. The implementation principle is as follows: |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant.png" alt="EasyScheduler Fault Tolerant Design" width="40%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
The Master monitors the directories of other Masters and Workers. If the remove event is detected, the process instance is fault-tolerant or the task instance is fault-tolerant according to the specific business logic. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Master fault tolerance flow chart: |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant_master.png" alt="Master Fault Tolerance Flowchart" width="40%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler thread in EasyScheduler. It traverses the DAG to find the "Running" and "Submit Successful" tasks, and monitors the status of its task instance for the "Running" task. You need to determine whether the Task Queue already exists. If it exists, monitor the status of the task instance. If it does not exist, resubmit the task instance. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Worker fault tolerance flow chart: |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant_worker.png" alt="Worker Fault Tolerance Flowchart" width="40%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
Once the Master Scheduler thread finds the task instance as "need to be fault tolerant", it takes over the task and resubmits. |
||||||
|
|
||||||
|
Note: Because the "network jitter" may cause the node to lose the heartbeat of ZooKeeper in a short time, the node's remove event occurs. In this case, we use the easiest way, that is, once the node has timeout connection with ZooKeeper, it will directly stop the Master or Worker service. |
||||||
|
|
||||||
|
###### 2. Task failure retry |
||||||
|
|
||||||
|
Here we must first distinguish between the concept of task failure retry, process failure recovery, and process failure rerun: |
||||||
|
|
||||||
|
- Task failure Retry is task level, which is automatically performed by the scheduling system. For example, if a shell task sets the number of retries to 3 times, then the shell task will try to run up to 3 times after failing to run. |
||||||
|
- Process failure recovery is process level, is done manually, recovery can only be performed from the failed node ** or ** from the current node ** |
||||||
|
- Process failure rerun is also process level, is done manually, rerun is from the start node |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Next, let's talk about the topic, we divided the task nodes in the workflow into two types. |
||||||
|
|
||||||
|
- One is a business node, which corresponds to an actual script or processing statement, such as a Shell node, an MR node, a Spark node, a dependent node, and so on. |
||||||
|
- There is also a logical node, which does not do the actual script or statement processing, but the logical processing of the entire process flow, such as sub-flow sections. |
||||||
|
|
||||||
|
Each ** service node** can configure the number of failed retries. When the task node fails, it will automatically retry until it succeeds or exceeds the configured number of retries. **Logical node** does not support failed retry. But the tasks in the logical nodes support retry. |
||||||
|
|
||||||
|
If there is a task failure in the workflow that reaches the maximum number of retries, the workflow will fail to stop, and the failed workflow can be manually rerun or process resumed. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### V. Task priority design |
||||||
|
|
||||||
|
In the early scheduling design, if there is no priority design and fair scheduling design, it will encounter the situation that the task submitted first may be completed simultaneously with the task submitted subsequently, but the priority of the process or task cannot be set. We have redesigned this, and we are currently designing it as follows: |
||||||
|
|
||||||
|
- According to ** different process instance priority ** prioritizes ** same process instance priority ** prioritizes ** task priority within the same process ** takes precedence over ** same process ** commit order from high Go to low for task processing. |
||||||
|
|
||||||
|
- The specific implementation is to resolve the priority according to the json of the task instance, and then save the ** process instance priority _ process instance id_task priority _ task id** information in the ZooKeeper task queue, when obtained from the task queue, Through string comparison, you can get the task that needs to be executed first. |
||||||
|
|
||||||
|
- The priority of the process definition is that some processes need to be processed before other processes. This can be configured at the start of the process or at the time of scheduled start. There are 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/process_priority.png" alt="Process Priority Configuration" width="40%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
- The priority of the task is also divided into 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/task_priority.png" alt="task priority configuration" width="35%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
##### VI. Logback and gRPC implement log access |
||||||
|
|
||||||
|
- Since the Web (UI) and Worker are not necessarily on the same machine, viewing the log is not as it is for querying local files. There are two options: |
||||||
|
- Put the logs on the ES search engine |
||||||
|
- Obtain remote log information through gRPC communication |
||||||
|
- Considering the lightweightness of EasyScheduler as much as possible, gRPC was chosen to implement remote access log information. |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/grpc.png" alt="grpc remote access" width="50%" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
- We use a custom Logback FileAppender and Filter function to generate a log file for each task instance. |
||||||
|
- The main implementation of FileAppender is as follows: |
||||||
|
|
||||||
|
```java |
||||||
|
/** |
||||||
|
* task log appender |
||||||
|
*/ |
||||||
|
Public class TaskLogAppender extends FileAppender<ILoggingEvent> { |
||||||
|
|
||||||
|
... |
||||||
|
|
||||||
|
@Override |
||||||
|
Protected void append(ILoggingEvent event) { |
||||||
|
|
||||||
|
If (currentlyActiveFile == null){ |
||||||
|
currentlyActiveFile = getFile(); |
||||||
|
} |
||||||
|
String activeFile = currentlyActiveFile; |
||||||
|
// thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId |
||||||
|
String threadName = event.getThreadName(); |
||||||
|
String[] threadNameArr = threadName.split("-"); |
||||||
|
// logId = processDefineId_processInstanceId_taskInstanceId |
||||||
|
String logId = threadNameArr[1]; |
||||||
|
... |
||||||
|
super.subAppend(event); |
||||||
|
} |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
Generate a log in the form of /process definition id/process instance id/task instance id.log |
||||||
|
|
||||||
|
- Filter matches the thread name starting with TaskLogInfo: |
||||||
|
- TaskLogFilter is implemented as follows: |
||||||
|
|
||||||
|
```java |
||||||
|
/** |
||||||
|
* task log filter |
||||||
|
*/ |
||||||
|
Public class TaskLogFilter extends Filter<ILoggingEvent> { |
||||||
|
|
||||||
|
@Override |
||||||
|
Public FilterReply decide(ILoggingEvent event) { |
||||||
|
If (event.getThreadName().startsWith("TaskLogInfo-")){ |
||||||
|
Return FilterReply.ACCEPT; |
||||||
|
} |
||||||
|
Return FilterReply.DENY; |
||||||
|
} |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### summary |
||||||
|
|
||||||
|
Starting from the scheduling, this paper introduces the architecture principle and implementation ideas of the big data distributed workflow scheduling system-EasyScheduler. To be continued |
@ -0,0 +1,207 @@ |
|||||||
|
# Backend Deployment Document |
||||||
|
|
||||||
|
There are two deployment modes for the backend: |
||||||
|
|
||||||
|
- automatic deployment |
||||||
|
- source code compile and then deployment |
||||||
|
|
||||||
|
## Preparations |
||||||
|
|
||||||
|
Download the latest version of the installation package, download address: [gitee download](https://gitee.com/easyscheduler/EasyScheduler/attach_files/) or [github download](https://github.com/analysys/EasyScheduler/releases), download escheduler-backend-x.x.x.tar.gz(back-end referred to as escheduler-backend),escheduler-ui-x.x.x.tar.gz(front-end referred to as escheduler-ui) |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### Preparations 1: Installation of basic software (self-installation of required items) |
||||||
|
|
||||||
|
* [Mysql](http://geek.analysys.cn/topic/124) (5.5+) : Mandatory |
||||||
|
* [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html) (1.8+) : Mandatory |
||||||
|
* [ZooKeeper](https://www.jianshu.com/p/de90172ea680)(3.4.6+) :Mandatory |
||||||
|
* [Hadoop](https://blog.csdn.net/Evankaka/article/details/51612437)(2.6+) :Optionally, if you need to use the resource upload function, MapReduce task submission needs to configure Hadoop (uploaded resource files are currently stored on Hdfs) |
||||||
|
* [Hive](https://staroon.pro/2017/12/09/HiveInstall/)(1.2.1) : Optional, hive task submission needs to be installed |
||||||
|
* Spark(1.x,2.x) : Optional, Spark task submission needs to be installed |
||||||
|
* PostgreSQL(8.2.15+) : Optional, PostgreSQL PostgreSQL stored procedures need to be installed |
||||||
|
|
||||||
|
``` |
||||||
|
Note: Easy Scheduler itself does not rely on Hadoop, Hive, Spark, PostgreSQL, but only calls their Client to run the corresponding tasks. |
||||||
|
``` |
||||||
|
|
||||||
|
#### Preparations 2: Create deployment users |
||||||
|
|
||||||
|
- Deployment users are created on all machines that require deployment scheduling, because the worker service executes jobs in `sudo-u {linux-user}`, so deployment users need sudo privileges and are confidential. |
||||||
|
|
||||||
|
``` |
||||||
|
vi /etc/sudoers |
||||||
|
|
||||||
|
# For example, the deployment user is an escheduler account |
||||||
|
escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL |
||||||
|
|
||||||
|
# And you need to comment out the Default requiretty line |
||||||
|
#Default requiretty |
||||||
|
``` |
||||||
|
|
||||||
|
#### Preparations 3: SSH Secret-Free Configuration |
||||||
|
Configure SSH secret-free login on deployment machines and other installation machines. If you want to install easyscheduler on deployment machines, you need to configure native password-free login itself. |
||||||
|
|
||||||
|
- [Connect the host and other machines SSH](http://geek.analysys.cn/topic/113) |
||||||
|
|
||||||
|
#### Preparations 4: database initialization |
||||||
|
|
||||||
|
* Create databases and accounts |
||||||
|
|
||||||
|
Execute the following command to create database and account |
||||||
|
|
||||||
|
``` |
||||||
|
CREATE DATABASE escheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; |
||||||
|
GRANT ALL PRIVILEGES ON escheduler.* TO '{user}'@'%' IDENTIFIED BY '{password}'; |
||||||
|
GRANT ALL PRIVILEGES ON escheduler.* TO '{user}'@'localhost' IDENTIFIED BY '{password}'; |
||||||
|
flush privileges; |
||||||
|
``` |
||||||
|
|
||||||
|
* creates tables and imports basic data |
||||||
|
Modify the following attributes in ./conf/dao/data_source.properties |
||||||
|
|
||||||
|
``` |
||||||
|
spring.datasource.url |
||||||
|
spring.datasource.username |
||||||
|
spring.datasource.password |
||||||
|
``` |
||||||
|
|
||||||
|
Execute scripts for creating tables and importing basic data |
||||||
|
|
||||||
|
``` |
||||||
|
sh ./script/create-escheduler.sh |
||||||
|
``` |
||||||
|
|
||||||
|
#### Preparations 5: Modify the deployment directory permissions and operation parameters |
||||||
|
|
||||||
|
instruction of escheduler-backend directory |
||||||
|
|
||||||
|
```directory |
||||||
|
bin : Basic service startup script |
||||||
|
conf : Project Profile |
||||||
|
lib : The project relies on jar packages, including individual module jars and third-party jars |
||||||
|
script : Cluster Start, Stop and Service Monitor Start and Stop scripts |
||||||
|
sql : The project relies on SQL files |
||||||
|
install.sh : One-click deployment script |
||||||
|
``` |
||||||
|
|
||||||
|
- Modify permissions (please modify the 'deployUser' to the corresponding deployment user) so that the deployment user has operational privileges on the escheduler-backend directory |
||||||
|
|
||||||
|
`sudo chown -R deployUser:deployUser escheduler-backend` |
||||||
|
|
||||||
|
- Modify the `.escheduler_env.sh` environment variable in the conf/env/directory |
||||||
|
|
||||||
|
- Modify deployment parameters (depending on your server and business situation): |
||||||
|
|
||||||
|
- Modify the parameters in **install.sh** to replace the values required by your business |
||||||
|
- MonitorServerState switch variable, added in version 1.0.3, controls whether to start the self-start script (monitor master, worker status, if off-line will start automatically). The default value of "false" means that the self-start script is not started, and if it needs to start, it is changed to "true". |
||||||
|
- 'hdfsStartupSate' switch variable controls whether to start hdfs |
||||||
|
The default value of "false" means not to start hdfs |
||||||
|
Change the variable to 'true' if you want to use hdfs, you also need to create the hdfs root path by yourself, that 'hdfsPath' in install.sh. |
||||||
|
|
||||||
|
- If you use hdfs-related functions, you need to copy**hdfs-site.xml** and **core-site.xml** to the conf directory |
||||||
|
|
||||||
|
|
||||||
|
## Deployment |
||||||
|
Automated deployment is recommended, and experienced partners can use source deployment as well. |
||||||
|
|
||||||
|
### Automated Deployment |
||||||
|
|
||||||
|
- Install zookeeper tools |
||||||
|
|
||||||
|
`pip install kazoo` |
||||||
|
|
||||||
|
- Switch to deployment user, one-click deployment |
||||||
|
|
||||||
|
`sh install.sh` |
||||||
|
|
||||||
|
- Use the `jps` command to check if the services are started (`jps` comes from `Java JDK`) |
||||||
|
|
||||||
|
```aidl |
||||||
|
MasterServer ----- Master Service |
||||||
|
WorkerServer ----- Worker Service |
||||||
|
LoggerServer ----- Logger Service |
||||||
|
ApiApplicationServer ----- API Service |
||||||
|
AlertServer ----- Alert Service |
||||||
|
``` |
||||||
|
|
||||||
|
If all services are normal, the automatic deployment is successful |
||||||
|
|
||||||
|
|
||||||
|
After successful deployment, the log can be viewed and stored in a specified folder. |
||||||
|
|
||||||
|
```logPath |
||||||
|
logs/ |
||||||
|
├── escheduler-alert-server.log |
||||||
|
├── escheduler-master-server.log |
||||||
|
|—— escheduler-worker-server.log |
||||||
|
|—— escheduler-api-server.log |
||||||
|
|—— escheduler-logger-server.log |
||||||
|
``` |
||||||
|
|
||||||
|
### Compile source code to deploy |
||||||
|
|
||||||
|
After downloading the release version of the source package, unzip it into the root directory |
||||||
|
|
||||||
|
* Execute the compilation command: |
||||||
|
|
||||||
|
``` |
||||||
|
mvn -U clean package assembly:assembly -Dmaven.test.skip=true |
||||||
|
``` |
||||||
|
|
||||||
|
* View directory |
||||||
|
|
||||||
|
After normal compilation, ./target/escheduler-{version}/ is generated in the current directory |
||||||
|
|
||||||
|
|
||||||
|
### Start-and-stop services commonly used in systems (for service purposes, please refer to System Architecture Design for details) |
||||||
|
|
||||||
|
* stop all services in the cluster |
||||||
|
|
||||||
|
` sh ./bin/stop-all.sh` |
||||||
|
|
||||||
|
* start all services in the cluster |
||||||
|
|
||||||
|
` sh ./bin/start-all.sh` |
||||||
|
|
||||||
|
* start and stop one master server |
||||||
|
|
||||||
|
```master |
||||||
|
sh ./bin/escheduler-daemon.sh start master-server |
||||||
|
sh ./bin/escheduler-daemon.sh stop master-server |
||||||
|
``` |
||||||
|
|
||||||
|
* start and stop one worker server |
||||||
|
|
||||||
|
```worker |
||||||
|
sh ./bin/escheduler-daemon.sh start worker-server |
||||||
|
sh ./bin/escheduler-daemon.sh stop worker-server |
||||||
|
``` |
||||||
|
|
||||||
|
* start and stop api server |
||||||
|
|
||||||
|
```Api |
||||||
|
sh ./bin/escheduler-daemon.sh start api-server |
||||||
|
sh ./bin/escheduler-daemon.sh stop api-server |
||||||
|
``` |
||||||
|
* start and stop logger server |
||||||
|
|
||||||
|
```Logger |
||||||
|
sh ./bin/escheduler-daemon.sh start logger-server |
||||||
|
sh ./bin/escheduler-daemon.sh stop logger-server |
||||||
|
``` |
||||||
|
* start and stop alert server |
||||||
|
|
||||||
|
```Alert |
||||||
|
sh ./bin/escheduler-daemon.sh start alert-server |
||||||
|
sh ./bin/escheduler-daemon.sh stop alert-server |
||||||
|
``` |
||||||
|
|
||||||
|
## Database Upgrade |
||||||
|
Database upgrade is a function added in version 1.0.2. The database can be upgraded automatically by executing the following command: |
||||||
|
|
||||||
|
```upgrade |
||||||
|
sh ./script/upgrade-escheduler.sh |
||||||
|
``` |
||||||
|
|
||||||
|
|
@ -0,0 +1,48 @@ |
|||||||
|
# Backend development documentation |
||||||
|
|
||||||
|
## Environmental requirements |
||||||
|
|
||||||
|
* [Mysql](http://geek.analysys.cn/topic/124) (5.5+) : Must be installed |
||||||
|
* [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html) (1.8+) : Must be installed |
||||||
|
* [ZooKeeper](https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper)(3.4.6+) :Must be installed |
||||||
|
* [Maven](http://maven.apache.org/download.cgi)(3.3+) :Must be installed |
||||||
|
|
||||||
|
Because the escheduler-rpc module in EasyScheduler uses Grpc, you need to use Maven to compile the generated classes. |
||||||
|
For those who are not familiar with maven, please refer to: [maven in five minutes](http://maven.apache.org/guides/getting-started/maven-in-five-minutes.html)(3.3+) |
||||||
|
|
||||||
|
http://maven.apache.org/install.html |
||||||
|
|
||||||
|
## Project compilation |
||||||
|
After importing the EasyScheduler source code into the development tools such as Idea, first convert to the Maven project (right click and select "Add Framework Support") |
||||||
|
|
||||||
|
* Execute the compile command: |
||||||
|
|
||||||
|
``` |
||||||
|
mvn -U clean package assembly:assembly -Dmaven.test.skip=true |
||||||
|
``` |
||||||
|
|
||||||
|
* View directory |
||||||
|
|
||||||
|
After normal compilation, it will generate ./target/escheduler-{version}/ in the current directory. |
||||||
|
|
||||||
|
``` |
||||||
|
bin |
||||||
|
conf |
||||||
|
lib |
||||||
|
script |
||||||
|
sql |
||||||
|
install.sh |
||||||
|
``` |
||||||
|
|
||||||
|
- Description |
||||||
|
|
||||||
|
``` |
||||||
|
bin : basic service startup script |
||||||
|
conf : project configuration file |
||||||
|
lib : the project depends on the jar package, including the various module jars and third-party jars |
||||||
|
script : cluster start, stop, and service monitoring start and stop scripts |
||||||
|
sql : project depends on sql file |
||||||
|
install.sh : one-click deployment script |
||||||
|
``` |
||||||
|
|
||||||
|
|
@ -0,0 +1,23 @@ |
|||||||
|
{ |
||||||
|
"title": "EasyScheduler", |
||||||
|
"author": "", |
||||||
|
"description": "Scheduler", |
||||||
|
"language": "en-US", |
||||||
|
"gitbook": "3.2.3", |
||||||
|
"styles": { |
||||||
|
"website": "./styles/website.css" |
||||||
|
}, |
||||||
|
"structure": { |
||||||
|
"readme": "README.md" |
||||||
|
}, |
||||||
|
"plugins":[ |
||||||
|
"expandable-chapters", |
||||||
|
"insert-logo-link" |
||||||
|
], |
||||||
|
"pluginsConfig": { |
||||||
|
"insert-logo-link": { |
||||||
|
"src": "http://geek.analysys.cn/static/upload/236/2019-03-29/379450b4-7919-4707-877c-4d33300377d4.png", |
||||||
|
"url": "https://github.com/analysys/EasyScheduler" |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,115 @@ |
|||||||
|
# frontend-deployment |
||||||
|
|
||||||
|
The front-end has three deployment modes: automated deployment, manual deployment and compiled source deployment. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Preparations |
||||||
|
|
||||||
|
#### Download the installation package |
||||||
|
|
||||||
|
Please download the latest version of the installation package, download address: [gitee](https://gitee.com/easyscheduler/EasyScheduler/attach_files/) |
||||||
|
|
||||||
|
After downloading escheduler-ui-x.x.x.tar.gz,decompress`tar -zxvf escheduler-ui-x.x.x.tar.gz ./`and enter the`escheduler-ui`directory |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Deployment |
||||||
|
|
||||||
|
Automated deployment is recommended for either of the following two ways |
||||||
|
|
||||||
|
### Automated Deployment |
||||||
|
|
||||||
|
Edit the installation file`vi install-escheduler-ui.sh` in the` escheduler-ui` directory |
||||||
|
|
||||||
|
Change the front-end access port and the back-end proxy interface address |
||||||
|
|
||||||
|
``` |
||||||
|
# Configure the front-end access port |
||||||
|
esc_proxy="8888" |
||||||
|
|
||||||
|
# Configure proxy back-end interface |
||||||
|
esc_proxy_port="http://192.168.xx.xx:12345" |
||||||
|
``` |
||||||
|
|
||||||
|
>Front-end automatic deployment based on Linux system `yum` operation, before deployment, please install and update`yum` |
||||||
|
|
||||||
|
under this directory, execute`./install-escheduler-ui.sh` |
||||||
|
|
||||||
|
|
||||||
|
### Manual Deployment |
||||||
|
|
||||||
|
Install epel source `yum install epel-release -y` |
||||||
|
|
||||||
|
Install Nginx `yum install nginx -y` |
||||||
|
|
||||||
|
|
||||||
|
> #### Nginx configuration file address |
||||||
|
|
||||||
|
``` |
||||||
|
/etc/nginx/conf.d/default.conf |
||||||
|
``` |
||||||
|
|
||||||
|
> #### Configuration information (self-modifying) |
||||||
|
|
||||||
|
``` |
||||||
|
server { |
||||||
|
listen 8888;# access port |
||||||
|
server_name localhost; |
||||||
|
#charset koi8-r; |
||||||
|
#access_log /var/log/nginx/host.access.log main; |
||||||
|
location / { |
||||||
|
root /xx/dist; # the dist directory address decompressed by the front end above (self-modifying) |
||||||
|
index index.html index.html; |
||||||
|
} |
||||||
|
location /escheduler { |
||||||
|
proxy_pass http://192.168.xx.xx:12345; # interface address (self-modifying) |
||||||
|
proxy_set_header Host $host; |
||||||
|
proxy_set_header X-Real-IP $remote_addr; |
||||||
|
proxy_set_header x_real_ipP $remote_addr; |
||||||
|
proxy_set_header remote_addr $remote_addr; |
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
||||||
|
proxy_http_version 1.1; |
||||||
|
proxy_connect_timeout 4s; |
||||||
|
proxy_read_timeout 30s; |
||||||
|
proxy_send_timeout 12s; |
||||||
|
proxy_set_header Upgrade $http_upgrade; |
||||||
|
proxy_set_header Connection "upgrade"; |
||||||
|
} |
||||||
|
#error_page 404 /404.html; |
||||||
|
# redirect server error pages to the static page /50x.html |
||||||
|
# |
||||||
|
error_page 500 502 503 504 /50x.html; |
||||||
|
location = /50x.html { |
||||||
|
root /usr/share/nginx/html; |
||||||
|
} |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
> #### Restart the Nginx service |
||||||
|
|
||||||
|
``` |
||||||
|
systemctl restart nginx |
||||||
|
``` |
||||||
|
|
||||||
|
#### nginx command |
||||||
|
|
||||||
|
- enable `systemctl enable nginx` |
||||||
|
|
||||||
|
- restart `systemctl restart nginx` |
||||||
|
|
||||||
|
- status `systemctl status nginx` |
||||||
|
|
||||||
|
|
||||||
|
## FAQ |
||||||
|
#### Upload file size limit |
||||||
|
|
||||||
|
Edit the configuration file `vi /etc/nginx/nginx.conf` |
||||||
|
|
||||||
|
``` |
||||||
|
# change upload size |
||||||
|
client_max_body_size 1024m |
||||||
|
``` |
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue