Simon
5 years ago
committed by
GitHub
1012 changed files with 67832 additions and 58715 deletions
@ -1,17 +0,0 @@
|
||||
name: CI |
||||
|
||||
on: [push] |
||||
|
||||
jobs: |
||||
build: |
||||
|
||||
runs-on: ubuntu-latest |
||||
|
||||
steps: |
||||
- uses: actions/checkout@v1 |
||||
- name: Set up JDK 1.8 |
||||
uses: actions/setup-java@v1 |
||||
with: |
||||
java-version: 1.8 |
||||
- name: Build with Maven |
||||
run: mvn apache-rat:check --file pom.xml |
@ -0,0 +1,67 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
on: ["push", "pull_request"] |
||||
env: |
||||
DOCKER_DIR: ./docker |
||||
LOG_DIR: /tmp/dolphinscheduler |
||||
|
||||
name: e2e Test |
||||
|
||||
jobs: |
||||
|
||||
build: |
||||
name: Test |
||||
runs-on: ubuntu-latest |
||||
steps: |
||||
|
||||
- uses: actions/checkout@v1 |
||||
with: |
||||
submodules: true |
||||
- uses: actions/cache@v1 |
||||
with: |
||||
path: ~/.m2/repository |
||||
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} |
||||
restore-keys: | |
||||
${{ runner.os }}-maven- |
||||
- name: Build Image |
||||
run: | |
||||
export VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'` |
||||
sh ./dockerfile/hooks/build |
||||
- name: Docker Run |
||||
run: | |
||||
VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'` |
||||
docker run -dit -e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -p 8888:8888 dolphinscheduler:$VERSION all |
||||
- name: Check Server Status |
||||
run: sh ./dockerfile/hooks/check |
||||
- name: Prepare e2e env |
||||
run: | |
||||
sudo apt-get install -y libxss1 libappindicator1 libindicator7 xvfb unzip |
||||
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb |
||||
sudo dpkg -i google-chrome*.deb |
||||
sudo apt-get install -f -y |
||||
wget -N https://chromedriver.storage.googleapis.com/80.0.3987.106/chromedriver_linux64.zip |
||||
unzip chromedriver_linux64.zip |
||||
sudo mv -f chromedriver /usr/local/share/chromedriver |
||||
sudo ln -s /usr/local/share/chromedriver /usr/local/bin/chromedriver |
||||
- name: Run e2e Test |
||||
run: cd ./e2e && mvn -B clean test |
||||
- name: Collect logs |
||||
run: | |
||||
mkdir -p ${LOG_DIR} |
||||
docker logs dolphinscheduler > ${LOG_DIR}/dolphinscheduler.txt |
||||
continue-on-error: true |
@ -0,0 +1,71 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
on: ["push", "pull_request"] |
||||
env: |
||||
DOCKER_DIR: ./docker |
||||
LOG_DIR: /tmp/dolphinscheduler |
||||
|
||||
name: Unit Test |
||||
|
||||
jobs: |
||||
|
||||
build: |
||||
name: Build |
||||
runs-on: ubuntu-latest |
||||
steps: |
||||
|
||||
- uses: actions/checkout@v1 |
||||
with: |
||||
submodules: true |
||||
- uses: actions/cache@v1 |
||||
with: |
||||
path: ~/.m2/repository |
||||
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} |
||||
restore-keys: | |
||||
${{ runner.os }}-maven- |
||||
- name: Bootstrap database |
||||
run: cd ${DOCKER_DIR} && docker-compose up -d |
||||
- name: Set up JDK 1.8 |
||||
uses: actions/setup-java@v1 |
||||
with: |
||||
java-version: 1.8 |
||||
- name: Compile |
||||
run: | |
||||
export MAVEN_OPTS='-Dmaven.repo.local=.m2/repository -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Xmx3g' |
||||
mvn test -B -Dmaven.test.skip=false |
||||
CODECOV_TOKEN="09c2663f-b091-4258-8a47-c981827eb29a" bash <(curl -s https://codecov.io/bash) |
||||
- name: Run SonarCloud Analysis |
||||
run: > |
||||
mvn verify --batch-mode |
||||
org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.1.1688:sonar |
||||
-Dsonar.coverage.jacoco.xmlReportPaths=target/site/jacoco/jacoco.xml |
||||
-Dmaven.test.skip=true |
||||
-Dsonar.host.url=https://sonarcloud.io |
||||
-Dsonar.organization=apache |
||||
-Dsonar.core.codeCoveragePlugin=jacoco |
||||
-Dsonar.projectKey=apache-dolphinscheduler |
||||
-Dsonar.login=e4058004bc6be89decf558ac819aa1ecbee57682 |
||||
env: |
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} |
||||
- name: Collect logs |
||||
run: | |
||||
mkdir -p ${LOG_DIR} |
||||
cd ${DOCKER_DIR} |
||||
docker-compose logs db > ${LOG_DIR}/db.txt |
||||
continue-on-error: true |
@ -0,0 +1,158 @@
|
||||
{ |
||||
"DOLPHIN": { |
||||
"service": [], |
||||
"DOLPHIN_API": [ |
||||
{ |
||||
"name": "dolphin_api_port_check", |
||||
"label": "dolphin_api_port_check", |
||||
"description": "dolphin_api_port_check.", |
||||
"interval": 10, |
||||
"scope": "ANY", |
||||
"source": { |
||||
"type": "PORT", |
||||
"uri": "{{dolphin-application-api/server.port}}", |
||||
"default_port": 12345, |
||||
"reporting": { |
||||
"ok": { |
||||
"text": "TCP OK - {0:.3f}s response on port {1}" |
||||
}, |
||||
"warning": { |
||||
"text": "TCP OK - {0:.3f}s response on port {1}", |
||||
"value": 1.5 |
||||
}, |
||||
"critical": { |
||||
"text": "Connection failed: {0} to {1}:{2}", |
||||
"value": 5.0 |
||||
} |
||||
} |
||||
} |
||||
} |
||||
], |
||||
"DOLPHIN_LOGGER": [ |
||||
{ |
||||
"name": "dolphin_logger_port_check", |
||||
"label": "dolphin_logger_port_check", |
||||
"description": "dolphin_logger_port_check.", |
||||
"interval": 10, |
||||
"scope": "ANY", |
||||
"source": { |
||||
"type": "PORT", |
||||
"uri": "{{dolphin-common/loggerserver.rpc.port}}", |
||||
"default_port": 50051, |
||||
"reporting": { |
||||
"ok": { |
||||
"text": "TCP OK - {0:.3f}s response on port {1}" |
||||
}, |
||||
"warning": { |
||||
"text": "TCP OK - {0:.3f}s response on port {1}", |
||||
"value": 1.5 |
||||
}, |
||||
"critical": { |
||||
"text": "Connection failed: {0} to {1}:{2}", |
||||
"value": 5.0 |
||||
} |
||||
} |
||||
} |
||||
} |
||||
], |
||||
"DOLPHIN_MASTER": [ |
||||
{ |
||||
"name": "DOLPHIN_MASTER_CHECK", |
||||
"label": "check dolphin scheduler master status", |
||||
"description": "", |
||||
"interval":10, |
||||
"scope": "HOST", |
||||
"enabled": true, |
||||
"source": { |
||||
"type": "SCRIPT", |
||||
"path": "DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py", |
||||
"parameters": [ |
||||
|
||||
{ |
||||
"name": "connection.timeout", |
||||
"display_name": "Connection Timeout", |
||||
"value": 5.0, |
||||
"type": "NUMERIC", |
||||
"description": "The maximum time before this alert is considered to be CRITICAL", |
||||
"units": "seconds", |
||||
"threshold": "CRITICAL" |
||||
}, |
||||
{ |
||||
"name": "alertName", |
||||
"display_name": "alertName", |
||||
"value": "DOLPHIN_MASTER", |
||||
"type": "STRING", |
||||
"description": "alert name" |
||||
} |
||||
] |
||||
} |
||||
} |
||||
], |
||||
"DOLPHIN_WORKER": [ |
||||
{ |
||||
"name": "DOLPHIN_WORKER_CHECK", |
||||
"label": "check dolphin scheduler worker status", |
||||
"description": "", |
||||
"interval":10, |
||||
"scope": "HOST", |
||||
"enabled": true, |
||||
"source": { |
||||
"type": "SCRIPT", |
||||
"path": "DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py", |
||||
"parameters": [ |
||||
|
||||
{ |
||||
"name": "connection.timeout", |
||||
"display_name": "Connection Timeout", |
||||
"value": 5.0, |
||||
"type": "NUMERIC", |
||||
"description": "The maximum time before this alert is considered to be CRITICAL", |
||||
"units": "seconds", |
||||
"threshold": "CRITICAL" |
||||
}, |
||||
{ |
||||
"name": "alertName", |
||||
"display_name": "alertName", |
||||
"value": "DOLPHIN_WORKER", |
||||
"type": "STRING", |
||||
"description": "alert name" |
||||
} |
||||
] |
||||
} |
||||
} |
||||
], |
||||
"DOLPHIN_ALERT": [ |
||||
{ |
||||
"name": "DOLPHIN_DOLPHIN_ALERT_CHECK", |
||||
"label": "check dolphin scheduler alert status", |
||||
"description": "", |
||||
"interval":10, |
||||
"scope": "HOST", |
||||
"enabled": true, |
||||
"source": { |
||||
"type": "SCRIPT", |
||||
"path": "DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py", |
||||
"parameters": [ |
||||
|
||||
{ |
||||
"name": "connection.timeout", |
||||
"display_name": "Connection Timeout", |
||||
"value": 5.0, |
||||
"type": "NUMERIC", |
||||
"description": "The maximum time before this alert is considered to be CRITICAL", |
||||
"units": "seconds", |
||||
"threshold": "CRITICAL" |
||||
}, |
||||
{ |
||||
"name": "alertName", |
||||
"display_name": "alertName", |
||||
"value": "DOLPHIN_ALERT", |
||||
"type": "STRING", |
||||
"description": "alert name" |
||||
} |
||||
] |
||||
} |
||||
} |
||||
] |
||||
} |
||||
} |
@ -0,0 +1,144 @@
|
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
<configuration> |
||||
<property> |
||||
<name>alert.type</name> |
||||
<value>EMAIL</value> |
||||
<description>alert type is EMAIL/SMS</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mail.protocol</name> |
||||
<value>SMTP</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mail.server.host</name> |
||||
<value>xxx.xxx.com</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mail.server.port</name> |
||||
<value>25</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mail.sender</name> |
||||
<value>admin</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mail.user</name> |
||||
<value>admin</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mail.passwd</name> |
||||
<value>000000</value> |
||||
<description></description> |
||||
<property-type>PASSWORD</property-type> |
||||
<value-attributes> |
||||
<type>password</type> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>mail.smtp.starttls.enable</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mail.smtp.ssl.enable</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mail.smtp.ssl.trust</name> |
||||
<value>xxx.xxx.com</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>xls.file.path</name> |
||||
<value>/tmp/xls</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>enterprise.wechat.enable</name> |
||||
<value>false</value> |
||||
<description></description> |
||||
<value-attributes> |
||||
<type>value-list</type> |
||||
<entries> |
||||
<entry> |
||||
<value>true</value> |
||||
<label>Enabled</label> |
||||
</entry> |
||||
<entry> |
||||
<value>false</value> |
||||
<label>Disabled</label> |
||||
</entry> |
||||
</entries> |
||||
<selection-cardinality>1</selection-cardinality> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>enterprise.wechat.corp.id</name> |
||||
<value>wechatId</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>enterprise.wechat.secret</name> |
||||
<value>secret</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>enterprise.wechat.agent.id</name> |
||||
<value>agentId</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>enterprise.wechat.users</name> |
||||
<value>wechatUsers</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
</configuration> |
@ -0,0 +1,71 @@
|
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
<configuration> |
||||
<property> |
||||
<name>server.port</name> |
||||
<value>12345</value> |
||||
<description> |
||||
server port |
||||
</description> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
</property> |
||||
<property> |
||||
<name>server.servlet.session.timeout</name> |
||||
<value>7200</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
</property> |
||||
<property> |
||||
<name>spring.servlet.multipart.max-file-size</name> |
||||
<value>1024</value> |
||||
<value-attributes> |
||||
<unit>MB</unit> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
</property> |
||||
<property> |
||||
<name>spring.servlet.multipart.max-request-size</name> |
||||
<value>1024</value> |
||||
<value-attributes> |
||||
<unit>MB</unit> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
</property> |
||||
<property> |
||||
<name>server.jetty.max-http-post-size</name> |
||||
<value>5000000</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
</property> |
||||
<property> |
||||
<name>spring.messages.encoding</name> |
||||
<value>UTF-8</value> |
||||
<description></description> |
||||
</property> |
||||
</configuration> |
@ -0,0 +1,467 @@
|
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
<configuration> |
||||
<property> |
||||
<name>spring.datasource.initialSize</name> |
||||
<value>5</value> |
||||
<description> |
||||
Init connection number |
||||
</description> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.minIdle</name> |
||||
<value>5</value> |
||||
<description> |
||||
Min connection number |
||||
</description> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.maxActive</name> |
||||
<value>50</value> |
||||
<description> |
||||
Max connection number |
||||
</description> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.maxWait</name> |
||||
<value>60000</value> |
||||
<description> |
||||
Max wait time for get a connection in milliseconds. |
||||
If configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. |
||||
If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. |
||||
</description> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.timeBetweenEvictionRunsMillis</name> |
||||
<value>60000</value> |
||||
<description> |
||||
Milliseconds for check to close free connections |
||||
</description> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.timeBetweenConnectErrorMillis</name> |
||||
<value>60000</value> |
||||
<description> |
||||
The Destroy thread detects the connection interval and closes the physical connection in milliseconds |
||||
if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. |
||||
</description> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.minEvictableIdleTimeMillis</name> |
||||
<value>300000</value> |
||||
<description> |
||||
The longest time a connection remains idle without being evicted, in milliseconds |
||||
</description> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.validationQuery</name> |
||||
<value>SELECT 1</value> |
||||
<description> |
||||
The SQL used to check whether the connection is valid requires a query statement. |
||||
If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.validationQueryTimeout</name> |
||||
<value>3</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
Check whether the connection is valid for timeout, in seconds |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.testWhileIdle</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description> |
||||
When applying for a connection, |
||||
if it is detected that the connection is idle longer than time Between Eviction Runs Millis, |
||||
validation Query is performed to check whether the connection is valid |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.testOnBorrow</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description> |
||||
Execute validation to check if the connection is valid when applying for a connection |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.testOnReturn</name> |
||||
<value>false</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description> |
||||
Execute validation to check if the connection is valid when the connection is returned |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.defaultAutoCommit</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.keepAlive</name> |
||||
<value>false</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>spring.datasource.poolPreparedStatements</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description> |
||||
Open PSCache, specify count PSCache for every connection |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.maxPoolPreparedStatementPerConnectionSize</name> |
||||
<value>20</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.spring.datasource.filters</name> |
||||
<value>stat,wall,log4j</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>spring.datasource.connectionProperties</name> |
||||
<value>druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>mybatis-plus.mapper-locations</name> |
||||
<value>classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.typeEnumsPackage</name> |
||||
<value>org.apache.dolphinscheduler.*.enums</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.typeAliasesPackage</name> |
||||
<value>org.apache.dolphinscheduler.dao.entity</value> |
||||
<description> |
||||
Entity scan, where multiple packages are separated by a comma or semicolon |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.global-config.db-config.id-type</name> |
||||
<value>AUTO</value> |
||||
<value-attributes> |
||||
<type>value-list</type> |
||||
<entries> |
||||
<entry> |
||||
<value>AUTO</value> |
||||
<label>AUTO</label> |
||||
</entry> |
||||
<entry> |
||||
<value>INPUT</value> |
||||
<label>INPUT</label> |
||||
</entry> |
||||
<entry> |
||||
<value>ID_WORKER</value> |
||||
<label>ID_WORKER</label> |
||||
</entry> |
||||
<entry> |
||||
<value>UUID</value> |
||||
<label>UUID</label> |
||||
</entry> |
||||
</entries> |
||||
<selection-cardinality>1</selection-cardinality> |
||||
</value-attributes> |
||||
<description> |
||||
Primary key type AUTO:" database ID AUTO ", |
||||
INPUT:" user INPUT ID", |
||||
ID_WORKER:" global unique ID (numeric type unique ID)", |
||||
UUID:" global unique ID UUID"; |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.global-config.db-config.field-strategy</name> |
||||
<value>NOT_NULL</value> |
||||
<value-attributes> |
||||
<type>value-list</type> |
||||
<entries> |
||||
<entry> |
||||
<value>IGNORED</value> |
||||
<label>IGNORED</label> |
||||
</entry> |
||||
<entry> |
||||
<value>NOT_NULL</value> |
||||
<label>NOT_NULL</label> |
||||
</entry> |
||||
<entry> |
||||
<value>NOT_EMPTY</value> |
||||
<label>NOT_EMPTY</label> |
||||
</entry> |
||||
</entries> |
||||
<selection-cardinality>1</selection-cardinality> |
||||
</value-attributes> |
||||
<description> |
||||
Field policy IGNORED:" ignore judgment ", |
||||
NOT_NULL:" not NULL judgment "), |
||||
NOT_EMPTY:" not NULL judgment" |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.global-config.db-config.column-underline</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.global-config.db-config.logic-delete-value</name> |
||||
<value>1</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.global-config.db-config.logic-not-delete-value</name> |
||||
<value>0</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.global-config.db-config.banner</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>mybatis-plus.configuration.map-underscore-to-camel-case</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.configuration.cache-enabled</name> |
||||
<value>false</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.configuration.call-setters-on-nulls</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>mybatis-plus.configuration.jdbc-type-for-null</name> |
||||
<value>null</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>master.exec.threads</name> |
||||
<value>100</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>master.exec.task.num</name> |
||||
<value>20</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>master.heartbeat.interval</name> |
||||
<value>10</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>master.task.commit.retryTimes</name> |
||||
<value>5</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>master.task.commit.interval</name> |
||||
<value>1000</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>master.max.cpuload.avg</name> |
||||
<value>100</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>master.reserved.memory</name> |
||||
<value>0.1</value> |
||||
<value-attributes> |
||||
<type>float</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>worker.exec.threads</name> |
||||
<value>100</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>worker.heartbeat.interval</name> |
||||
<value>10</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>worker.fetch.task.num</name> |
||||
<value>3</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>worker.max.cpuload.avg</name> |
||||
<value>100</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>worker.reserved.memory</name> |
||||
<value>0.1</value> |
||||
<value-attributes> |
||||
<type>float</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
</configuration> |
@ -0,0 +1,232 @@
|
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
<configuration> |
||||
<property> |
||||
<name>dolphinscheduler.queue.impl</name> |
||||
<value>zookeeper</value> |
||||
<description> |
||||
Task queue implementation, default "zookeeper" |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>zookeeper.dolphinscheduler.root</name> |
||||
<value>/dolphinscheduler</value> |
||||
<description> |
||||
dolphinscheduler root directory |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>zookeeper.session.timeout</name> |
||||
<value>300</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>zookeeper.connection.timeout</name> |
||||
<value>300</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>zookeeper.retry.base.sleep</name> |
||||
<value>100</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>zookeeper.retry.max.sleep</name> |
||||
<value>30000</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>zookeeper.retry.maxtime</name> |
||||
<value>5</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>res.upload.startup.type</name> |
||||
<display-name>Choose Resource Upload Startup Type</display-name> |
||||
<description> |
||||
Resource upload startup type : HDFS,S3,NONE |
||||
</description> |
||||
<value>NONE</value> |
||||
<value-attributes> |
||||
<type>value-list</type> |
||||
<entries> |
||||
<entry> |
||||
<value>HDFS</value> |
||||
<label>HDFS</label> |
||||
</entry> |
||||
<entry> |
||||
<value>S3</value> |
||||
<label>S3</label> |
||||
</entry> |
||||
<entry> |
||||
<value>NONE</value> |
||||
<label>NONE</label> |
||||
</entry> |
||||
</entries> |
||||
<selection-cardinality>1</selection-cardinality> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>hdfs.root.user</name> |
||||
<value>hdfs</value> |
||||
<description> |
||||
Users who have permission to create directories under the HDFS root path |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>data.store2hdfs.basepath</name> |
||||
<value>/dolphinscheduler</value> |
||||
<description> |
||||
Data base dir, resource file will store to this hadoop hdfs path, self configuration, |
||||
please make sure the directory exists on hdfs and have read write permissions。 |
||||
"/dolphinscheduler" is recommended |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>data.basedir.path</name> |
||||
<value>/tmp/dolphinscheduler</value> |
||||
<description> |
||||
User data directory path, self configuration, |
||||
please make sure the directory exists and have read write permissions |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>hadoop.security.authentication.startup.state</name> |
||||
<value>false</value> |
||||
<value-attributes> |
||||
<type>value-list</type> |
||||
<entries> |
||||
<entry> |
||||
<value>true</value> |
||||
<label>Enabled</label> |
||||
</entry> |
||||
<entry> |
||||
<value>false</value> |
||||
<label>Disabled</label> |
||||
</entry> |
||||
</entries> |
||||
<selection-cardinality>1</selection-cardinality> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>java.security.krb5.conf.path</name> |
||||
<value>/opt/krb5.conf</value> |
||||
<description> |
||||
java.security.krb5.conf path |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>login.user.keytab.username</name> |
||||
<value>hdfs-mycluster@ESZ.COM</value> |
||||
<description> |
||||
LoginUserFromKeytab user |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>login.user.keytab.path</name> |
||||
<value>/opt/hdfs.headless.keytab</value> |
||||
<description> |
||||
LoginUserFromKeytab path |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>resource.view.suffixs</name> |
||||
<value>txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties</value> |
||||
<description></description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>fs.defaultFS</name> |
||||
<value>hdfs://mycluster:8020</value> |
||||
<description> |
||||
HA or single namenode, |
||||
If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory, |
||||
support s3,for example : s3a://dolphinscheduler |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>fs.s3a.endpoint</name> |
||||
<value>http://host:9010</value> |
||||
<description> |
||||
s3 need,s3 endpoint |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>fs.s3a.access.key</name> |
||||
<value>A3DXS30FO22544RE</value> |
||||
<description> |
||||
s3 need,s3 access key |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>fs.s3a.secret.key</name> |
||||
<value>OloCLq3n+8+sdPHUhJ21XrSxTC+JK</value> |
||||
<description> |
||||
s3 need,s3 secret key |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>loggerserver.rpc.port</name> |
||||
<value>50051</value> |
||||
<value-attributes> |
||||
<type>int</type>F |
||||
</value-attributes> |
||||
<description> |
||||
</description> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
</configuration> |
@ -0,0 +1,123 @@
|
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
<configuration> |
||||
<property> |
||||
<name>dolphin.database.type</name> |
||||
<value>mysql</value> |
||||
<description>Dolphin Scheduler DataBase Type Which Is Select</description> |
||||
<display-name>Dolphin Database Type</display-name> |
||||
<value-attributes> |
||||
<type>value-list</type> |
||||
<entries> |
||||
<entry> |
||||
<value>mysql</value> |
||||
<label>Mysql</label> |
||||
</entry> |
||||
<entry> |
||||
<value>postgresql</value> |
||||
<label>Postgresql</label> |
||||
</entry> |
||||
</entries> |
||||
<selection-cardinality>1</selection-cardinality> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>dolphin.database.host</name> |
||||
<value></value> |
||||
<display-name>Dolphin Database Host</display-name> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>dolphin.database.port</name> |
||||
<value></value> |
||||
<display-name>Dolphin Database Port</display-name> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>dolphin.database.username</name> |
||||
<value></value> |
||||
<display-name>Dolphin Database Username</display-name> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>dolphin.database.password</name> |
||||
<value></value> |
||||
<display-name>Dolphin Database Password</display-name> |
||||
<property-type>PASSWORD</property-type> |
||||
<value-attributes> |
||||
<type>password</type> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>dolphin.user</name> |
||||
<value></value> |
||||
<description>Which user to install and admin dolphin scheduler</description> |
||||
<display-name>Deploy User</display-name> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
<property> |
||||
<name>dolphin.group</name> |
||||
<value></value> |
||||
<description>Which user to install and admin dolphin scheduler</description> |
||||
<display-name>Deploy Group</display-name> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
|
||||
<property> |
||||
<name>dolphinscheduler-env-content</name> |
||||
<display-name>Dolphinscheduler Env template</display-name> |
||||
<description>This is the jinja template for dolphinscheduler.env.sh file</description> |
||||
<value># |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
export HADOOP_HOME=/opt/soft/hadoop |
||||
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop |
||||
export SPARK_HOME1=/opt/soft/spark1 |
||||
export SPARK_HOME2=/opt/soft/spark2 |
||||
export PYTHON_HOME=/opt/soft/python |
||||
export JAVA_HOME=/opt/soft/java |
||||
export HIVE_HOME=/opt/soft/hive |
||||
export FLINK_HOME=/opt/soft/flink</value> |
||||
<value-attributes> |
||||
<type>content</type> |
||||
<empty-value-valid>false</empty-value-valid> |
||||
<show-property-name>false</show-property-name> |
||||
</value-attributes> |
||||
<on-ambari-upgrade add="true"/> |
||||
</property> |
||||
</configuration> |
@ -0,0 +1,131 @@
|
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
<configuration> |
||||
<property> |
||||
<name>org.quartz.scheduler.instanceName</name> |
||||
<value>DolphinScheduler</value> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<!-- 列举枚举值 --> |
||||
<name>org.quartz.scheduler.instanceId</name> |
||||
<value>AUTO</value> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.scheduler.makeSchedulerThreadDaemon</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.jobStore.useProperties</name> |
||||
<value>false</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.threadPool.class</name> |
||||
<value>org.quartz.simpl.SimpleThreadPool</value> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.threadPool.makeThreadsDaemons</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.threadPool.threadCount</name> |
||||
<value>25</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.threadPool.threadPriority</name> |
||||
<value>5</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.jobStore.class</name> |
||||
<value>org.quartz.impl.jdbcjobstore.JobStoreTX</value> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.jobStore.tablePrefix</name> |
||||
<value>QRTZ_</value> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.jobStore.isClustered</name> |
||||
<value>true</value> |
||||
<value-attributes> |
||||
<type>boolean</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.jobStore.misfireThreshold</name> |
||||
<value>60000</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.jobStore.clusterCheckinInterval</name> |
||||
<value>5000</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.jobStore.dataSource</name> |
||||
<value>myDs</value> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.dataSource.myDs.connectionProvider.class</name> |
||||
<value>org.apache.dolphinscheduler.server.quartz.DruidConnectionProvider</value> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.dataSource.myDs.maxConnections</name> |
||||
<value>10</value> |
||||
<value-attributes> |
||||
<type>int</type> |
||||
</value-attributes> |
||||
<description></description> |
||||
</property> |
||||
<property> |
||||
<name>org.quartz.dataSource.myDs.validationQuery</name> |
||||
<value>select 1</value> |
||||
<description></description> |
||||
</property> |
||||
</configuration> |
@ -0,0 +1,137 @@
|
||||
<?xml version="1.0"?> |
||||
<!-- |
||||
Licensed to the Apache Software Foundation (ASF) under one or more |
||||
contributor license agreements. See the NOTICE file distributed with |
||||
this work for additional information regarding copyright ownership. |
||||
The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
(the "License"); you may not use this file except in compliance with |
||||
the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
--> |
||||
<metainfo> |
||||
<schemaVersion>2.0</schemaVersion> |
||||
<services> |
||||
<service> |
||||
<name>DOLPHIN</name> |
||||
<displayName>Dolphin Scheduler</displayName> |
||||
<comment>分布式易扩展的可视化DAG工作流任务调度系统</comment> |
||||
<version>1.2.1</version> |
||||
<components> |
||||
<component> |
||||
<name>DOLPHIN_MASTER</name> |
||||
<displayName>DS Master</displayName> |
||||
<category>MASTER</category> |
||||
<cardinality>1+</cardinality> |
||||
<commandScript> |
||||
<script>scripts/dolphin_master_service.py</script> |
||||
<scriptType>PYTHON</scriptType> |
||||
<timeout>600</timeout> |
||||
</commandScript> |
||||
</component> |
||||
|
||||
<component> |
||||
<name>DOLPHIN_LOGGER</name> |
||||
<displayName>DS Logger</displayName> |
||||
<category>SLAVE</category> |
||||
<cardinality>1+</cardinality> |
||||
<commandScript> |
||||
<script>scripts/dolphin_logger_service.py</script> |
||||
<scriptType>PYTHON</scriptType> |
||||
<timeout>600</timeout> |
||||
</commandScript> |
||||
</component> |
||||
|
||||
<component> |
||||
<name>DOLPHIN_WORKER</name> |
||||
<displayName>DS Worker</displayName> |
||||
<category>SLAVE</category> |
||||
<cardinality>1+</cardinality> |
||||
<dependencies> |
||||
<dependency> |
||||
<name>DOLPHIN/DOLPHIN_LOGGER</name> |
||||
<scope>host</scope> |
||||
<auto-deploy> |
||||
<enabled>true</enabled> |
||||
</auto-deploy> |
||||
</dependency> |
||||
</dependencies> |
||||
<commandScript> |
||||
<script>scripts/dolphin_worker_service.py</script> |
||||
<scriptType>PYTHON</scriptType> |
||||
<timeout>600</timeout> |
||||
</commandScript> |
||||
</component> |
||||
|
||||
<component> |
||||
<name>DOLPHIN_ALERT</name> |
||||
<displayName>DS Alert</displayName> |
||||
<category>SLAVE</category> |
||||
<cardinality>1</cardinality> |
||||
<commandScript> |
||||
<script>scripts/dolphin_alert_service.py</script> |
||||
<scriptType>PYTHON</scriptType> |
||||
<timeout>600</timeout> |
||||
</commandScript> |
||||
</component> |
||||
|
||||
<component> |
||||
<name>DOLPHIN_API</name> |
||||
<displayName>DS_Api</displayName> |
||||
<category>SLAVE</category> |
||||
<cardinality>1</cardinality> |
||||
<commandScript> |
||||
<script>scripts/dolphin_api_service.py</script> |
||||
<scriptType>PYTHON</scriptType> |
||||
<timeout>600</timeout> |
||||
</commandScript> |
||||
</component> |
||||
</components> |
||||
|
||||
<requiredServices> |
||||
<service>ZOOKEEPER</service> |
||||
</requiredServices> |
||||
|
||||
<osSpecifics> |
||||
<osSpecific> |
||||
<osFamily>any</osFamily> |
||||
<packages> |
||||
<package> |
||||
<name>apache-dolphinscheduler-incubating-1.2.1*</name> |
||||
</package> |
||||
</packages> |
||||
</osSpecific> |
||||
</osSpecifics> |
||||
|
||||
<configuration-dependencies> |
||||
<config-type>dolphin-alert</config-type> |
||||
<config-type>dolphin-app-api</config-type> |
||||
<config-type>dolphin-app-dao</config-type> |
||||
<config-type>dolphin-common</config-type> |
||||
<config-type>dolphin-env</config-type> |
||||
<config-type>dolphin-quartz</config-type> |
||||
</configuration-dependencies> |
||||
|
||||
<themes> |
||||
<theme> |
||||
<fileName>theme.json</fileName> |
||||
<default>true</default> |
||||
</theme> |
||||
</themes> |
||||
|
||||
<quickLinksConfigurations-dir>quicklinks</quickLinksConfigurations-dir> |
||||
<quickLinksConfigurations> |
||||
<quickLinksConfiguration> |
||||
<fileName>quicklinks.json</fileName> |
||||
<default>true</default> |
||||
</quickLinksConfiguration> |
||||
</quickLinksConfigurations> |
||||
</service> |
||||
</services> |
||||
</metainfo> |
@ -0,0 +1,124 @@
|
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
""" |
||||
|
||||
import socket |
||||
import urllib2 |
||||
import os |
||||
import logging |
||||
import ambari_simplejson as json |
||||
from resource_management.libraries.script.script import Script |
||||
import sys |
||||
reload(sys) |
||||
sys.setdefaultencoding('utf-8') |
||||
|
||||
logger = logging.getLogger('ambari_alerts') |
||||
|
||||
config = Script.get_config() |
||||
|
||||
|
||||
def get_tokens(): |
||||
""" |
||||
Returns a tuple of tokens in the format {{site/property}} that will be used |
||||
to build the dictionary passed into execute |
||||
|
||||
:rtype tuple |
||||
""" |
||||
|
||||
def get_info(url, connection_timeout): |
||||
response = None |
||||
|
||||
try: |
||||
response = urllib2.urlopen(url, timeout=connection_timeout) |
||||
json_data = response.read() |
||||
return json_data |
||||
finally: |
||||
if response is not None: |
||||
try: |
||||
response.close() |
||||
except: |
||||
pass |
||||
|
||||
|
||||
def execute(configurations={}, parameters={}, host_name=None): |
||||
""" |
||||
Returns a tuple containing the result code and a pre-formatted result label |
||||
|
||||
Keyword arguments: |
||||
configurations : a mapping of configuration key to value |
||||
parameters : a mapping of script parameter key to value |
||||
host_name : the name of this host where the alert is running |
||||
|
||||
:type configurations dict |
||||
:type parameters dict |
||||
:type host_name str |
||||
""" |
||||
|
||||
alert_name = parameters['alertName'] |
||||
|
||||
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler" |
||||
|
||||
pid = "0" |
||||
|
||||
|
||||
from resource_management.core import sudo |
||||
|
||||
is_running = True |
||||
pid_file_path = "" |
||||
if alert_name == 'DOLPHIN_MASTER': |
||||
pid_file_path = dolphin_pidfile_dir + "/master-server.pid" |
||||
elif alert_name == 'DOLPHIN_WORKER': |
||||
pid_file_path = dolphin_pidfile_dir + "/worker-server.pid" |
||||
elif alert_name == 'DOLPHIN_ALERT': |
||||
pid_file_path = dolphin_pidfile_dir + "/alert-server.pid" |
||||
elif alert_name == 'DOLPHIN_LOGGER': |
||||
pid_file_path = dolphin_pidfile_dir + "/logger-server.pid" |
||||
elif alert_name == 'DOLPHIN_API': |
||||
pid_file_path = dolphin_pidfile_dir + "/api-server.pid" |
||||
|
||||
if not pid_file_path or not os.path.isfile(pid_file_path): |
||||
is_running = False |
||||
|
||||
try: |
||||
pid = int(sudo.read_file(pid_file_path)) |
||||
except: |
||||
is_running = False |
||||
|
||||
try: |
||||
# Kill will not actually kill the process |
||||
# From the doc: |
||||
# If sig is 0, then no signal is sent, but error checking is still |
||||
# performed; this can be used to check for the existence of a |
||||
# process ID or process group ID. |
||||
sudo.kill(pid, 0) |
||||
except OSError: |
||||
is_running = False |
||||
|
||||
if host_name is None: |
||||
host_name = socket.getfqdn() |
||||
|
||||
if not is_running: |
||||
result_code = "CRITICAL" |
||||
else: |
||||
result_code = "OK" |
||||
|
||||
label = "The comment {0} of DOLPHIN_SCHEDULER on {1} is {2}".format(alert_name, host_name, result_code) |
||||
|
||||
return ((result_code, [label])) |
||||
|
||||
if __name__ == "__main__": |
||||
pass |
@ -0,0 +1,61 @@
|
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
""" |
||||
import time |
||||
from resource_management import * |
||||
|
||||
from dolphin_env import dolphin_env |
||||
|
||||
|
||||
class DolphinAlertService(Script): |
||||
def install(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.install_packages(env) |
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) |
||||
|
||||
def configure(self, env): |
||||
import params |
||||
params.pika_slave = True |
||||
env.set_params(params) |
||||
|
||||
dolphin_env() |
||||
|
||||
def start(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.configure(env) |
||||
no_op_test = format("ls {dolphin_pidfile_dir}/alert-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/alert-server.pid` | grep `cat {dolphin_pidfile_dir}/alert-server.pid` >/dev/null 2>&1") |
||||
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start alert-server") |
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) |
||||
|
||||
def stop(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop alert-server") |
||||
Execute(stop_cmd, user=params.dolphin_user) |
||||
time.sleep(5) |
||||
|
||||
def status(self, env): |
||||
import status_params |
||||
env.set_params(status_params) |
||||
check_process_status(status_params.dolphin_run_dir + "alert-server.pid") |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
DolphinAlertService().execute() |
@ -0,0 +1,70 @@
|
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
""" |
||||
import time |
||||
from resource_management import * |
||||
|
||||
from dolphin_env import dolphin_env |
||||
|
||||
|
||||
class DolphinApiService(Script): |
||||
def install(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.install_packages(env) |
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) |
||||
|
||||
def configure(self, env): |
||||
import params |
||||
params.pika_slave = True |
||||
env.set_params(params) |
||||
|
||||
dolphin_env() |
||||
|
||||
def start(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.configure(env) |
||||
|
||||
#init |
||||
init_cmd=format("sh " + params.dolphin_home + "/script/create-dolphinscheduler.sh") |
||||
Execute(init_cmd, user=params.dolphin_user) |
||||
|
||||
#upgrade |
||||
upgrade_cmd=format("sh " + params.dolphin_home + "/script/upgrade-dolphinscheduler.sh") |
||||
Execute(upgrade_cmd, user=params.dolphin_user) |
||||
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/api-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/api-server.pid` | grep `cat {dolphin_pidfile_dir}/api-server.pid` >/dev/null 2>&1") |
||||
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start api-server") |
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) |
||||
|
||||
def stop(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop api-server") |
||||
Execute(stop_cmd, user=params.dolphin_user) |
||||
time.sleep(5) |
||||
|
||||
def status(self, env): |
||||
import status_params |
||||
env.set_params(status_params) |
||||
check_process_status(status_params.dolphin_run_dir + "api-server.pid") |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
DolphinApiService().execute() |
@ -0,0 +1,121 @@
|
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
|
||||
""" |
||||
|
||||
from resource_management import * |
||||
|
||||
|
||||
def dolphin_env(): |
||||
import params |
||||
|
||||
Directory(params.dolphin_pidfile_dir, |
||||
mode=0777, |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group, |
||||
create_parents=True |
||||
) |
||||
Directory(params.dolphin_log_dir, |
||||
mode=0777, |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group, |
||||
create_parents=True |
||||
) |
||||
Directory(params.dolphin_conf_dir, |
||||
mode=0777, |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group, |
||||
create_parents=True |
||||
) |
||||
|
||||
|
||||
Directory(params.dolphin_alert_map['xls.file.path'], |
||||
mode=0777, |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group, |
||||
create_parents=True |
||||
) |
||||
Directory(params.dolphin_common_map['data.basedir.path'], |
||||
mode=0777, |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group, |
||||
create_parents=True |
||||
) |
||||
Directory(params.dolphin_common_map['data.download.basedir.path'], |
||||
mode=0777, |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group, |
||||
create_parents=True |
||||
) |
||||
Directory(params.dolphin_common_map['process.exec.basepath'], |
||||
mode=0777, |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group, |
||||
create_parents=True |
||||
) |
||||
|
||||
|
||||
File(format(params.dolphin_env_path), |
||||
mode=0777, |
||||
content=InlineTemplate(params.dolphin_env_content), |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group |
||||
) |
||||
|
||||
|
||||
File(format(params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh"), |
||||
mode=0755, |
||||
content=Template("dolphin-daemon.j2"), |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group |
||||
) |
||||
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/alert.properties"), |
||||
mode=0755, |
||||
content=Template("alert.properties.j2"), |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group |
||||
) |
||||
|
||||
File(format(params.dolphin_conf_dir + "/application.properties"), |
||||
mode=0755, |
||||
content=Template("application.properties.j2"), |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group |
||||
) |
||||
|
||||
File(format(params.dolphin_conf_dir + "/application-api.properties"), |
||||
mode=0755, |
||||
content=Template("application-api.properties.j2"), |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group |
||||
) |
||||
|
||||
File(format(params.dolphin_conf_dir + "/common.properties"), |
||||
mode=0755, |
||||
content=Template("common.properties.j2"), |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group |
||||
) |
||||
|
||||
File(format(params.dolphin_conf_dir + "/quartz.properties"), |
||||
mode=0755, |
||||
content=Template("quartz.properties.j2"), |
||||
owner=params.dolphin_user, |
||||
group=params.dolphin_group |
||||
) |
@ -0,0 +1,61 @@
|
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
""" |
||||
import time |
||||
from resource_management import * |
||||
|
||||
from dolphin_env import dolphin_env |
||||
|
||||
|
||||
class DolphinLoggerService(Script): |
||||
def install(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.install_packages(env) |
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) |
||||
|
||||
def configure(self, env): |
||||
import params |
||||
params.pika_slave = True |
||||
env.set_params(params) |
||||
|
||||
dolphin_env() |
||||
|
||||
def start(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.configure(env) |
||||
no_op_test = format("ls {dolphin_pidfile_dir}/logger-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/logger-server.pid` | grep `cat {dolphin_pidfile_dir}/logger-server.pid` >/dev/null 2>&1") |
||||
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start logger-server") |
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) |
||||
|
||||
def stop(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop logger-server") |
||||
Execute(stop_cmd, user=params.dolphin_user) |
||||
time.sleep(5) |
||||
|
||||
def status(self, env): |
||||
import status_params |
||||
env.set_params(status_params) |
||||
check_process_status(status_params.dolphin_run_dir + "logger-server.pid") |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
DolphinLoggerService().execute() |
@ -0,0 +1,61 @@
|
||||
# -*- coding: utf-8 -*- |
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
""" |
||||
import time |
||||
from resource_management import * |
||||
|
||||
from dolphin_env import dolphin_env |
||||
|
||||
|
||||
class DolphinMasterService(Script): |
||||
def install(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.install_packages(env) |
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) |
||||
|
||||
def configure(self, env): |
||||
import params |
||||
params.pika_slave = True |
||||
env.set_params(params) |
||||
|
||||
dolphin_env() |
||||
|
||||
def start(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.configure(env) |
||||
no_op_test = format("ls {dolphin_pidfile_dir}/master-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/master-server.pid` | grep `cat {dolphin_pidfile_dir}/master-server.pid` >/dev/null 2>&1") |
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start master-server") |
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) |
||||
|
||||
def stop(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop master-server") |
||||
Execute(stop_cmd, user=params.dolphin_user) |
||||
time.sleep(5) |
||||
|
||||
def status(self, env): |
||||
import status_params |
||||
env.set_params(status_params) |
||||
check_process_status(status_params.dolphin_run_dir + "master-server.pid") |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
DolphinMasterService().execute() |
@ -0,0 +1,60 @@
|
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
""" |
||||
import time |
||||
from resource_management import * |
||||
|
||||
from dolphin_env import dolphin_env |
||||
|
||||
|
||||
class DolphinWorkerService(Script): |
||||
def install(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.install_packages(env) |
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) |
||||
|
||||
def configure(self, env): |
||||
import params |
||||
params.pika_slave = True |
||||
env.set_params(params) |
||||
|
||||
dolphin_env() |
||||
|
||||
def start(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
self.configure(env) |
||||
no_op_test = format("ls {dolphin_pidfile_dir}/worker-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/worker-server.pid` | grep `cat {dolphin_pidfile_dir}/worker-server.pid` >/dev/null 2>&1") |
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start worker-server") |
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) |
||||
|
||||
def stop(self, env): |
||||
import params |
||||
env.set_params(params) |
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop worker-server") |
||||
Execute(stop_cmd, user=params.dolphin_user) |
||||
time.sleep(5) |
||||
|
||||
def status(self, env): |
||||
import status_params |
||||
env.set_params(status_params) |
||||
check_process_status(status_params.dolphin_run_dir + "worker-server.pid") |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
DolphinWorkerService().execute() |
@ -0,0 +1,150 @@
|
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE_2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
|
||||
""" |
||||
|
||||
import sys |
||||
from resource_management import * |
||||
from resource_management.core.logger import Logger |
||||
from resource_management.libraries.functions import default |
||||
|
||||
Logger.initialize_logger() |
||||
reload(sys) |
||||
sys.setdefaultencoding('utf-8') |
||||
|
||||
# server configurations |
||||
config = Script.get_config() |
||||
|
||||
# conf_dir = "/etc/" |
||||
dolphin_home = "/opt/soft/apache-dolphinscheduler-incubating-1.2.1" |
||||
dolphin_conf_dir = dolphin_home + "/conf" |
||||
dolphin_log_dir = dolphin_home + "/logs" |
||||
dolphin_bin_dir = dolphin_home + "/bin" |
||||
dolphin_lib_jars = dolphin_home + "/lib/*" |
||||
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler" |
||||
|
||||
rmHosts = default("/clusterHostInfo/rm_host", []) |
||||
|
||||
# dolphin-env |
||||
dolphin_env_map = {} |
||||
dolphin_env_map.update(config['configurations']['dolphin-env']) |
||||
|
||||
# which user to install and admin dolphin scheduler |
||||
dolphin_user = dolphin_env_map['dolphin.user'] |
||||
dolphin_group = dolphin_env_map['dolphin.group'] |
||||
|
||||
# .dolphinscheduler_env.sh |
||||
dolphin_env_path = dolphin_conf_dir + '/env/dolphinscheduler_env.sh' |
||||
dolphin_env_content = dolphin_env_map['dolphinscheduler-env-content'] |
||||
|
||||
# database config |
||||
dolphin_database_config = {} |
||||
dolphin_database_config['dolphin_database_type'] = dolphin_env_map['dolphin.database.type'] |
||||
dolphin_database_config['dolphin_database_host'] = dolphin_env_map['dolphin.database.host'] |
||||
dolphin_database_config['dolphin_database_port'] = dolphin_env_map['dolphin.database.port'] |
||||
dolphin_database_config['dolphin_database_username'] = dolphin_env_map['dolphin.database.username'] |
||||
dolphin_database_config['dolphin_database_password'] = dolphin_env_map['dolphin.database.password'] |
||||
|
||||
if 'mysql' == dolphin_database_config['dolphin_database_type']: |
||||
dolphin_database_config['dolphin_database_driver'] = 'com.mysql.jdbc.Driver' |
||||
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.StdJDBCDelegate' |
||||
dolphin_database_config['dolphin_database_url'] = 'jdbc:mysql://' + dolphin_env_map['dolphin.database.host'] \ |
||||
+ ':' + dolphin_env_map['dolphin.database.port'] \ |
||||
+ '/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8' |
||||
else: |
||||
dolphin_database_config['dolphin_database_driver'] = 'org.postgresql.Driver' |
||||
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.PostgreSQLDelegate' |
||||
dolphin_database_config['dolphin_database_url'] = 'jdbc:postgresql://' + dolphin_env_map['dolphin.database.host'] \ |
||||
+ ':' + dolphin_env_map['dolphin.database.port'] \ |
||||
+ '/dolphinscheduler' |
||||
|
||||
# application-alert.properties |
||||
dolphin_alert_map = {} |
||||
wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token' |
||||
wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret' |
||||
wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}' |
||||
wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}' |
||||
|
||||
dolphin_alert_map['enterprise.wechat.push.ur'] = wechat_push_url |
||||
dolphin_alert_map['enterprise.wechat.token.url'] = wechat_token_url |
||||
dolphin_alert_map['enterprise.wechat.team.send.msg'] = wechat_team_send_msg |
||||
dolphin_alert_map['enterprise.wechat.user.send.msg'] = wechat_user_send_msg |
||||
dolphin_alert_map.update(config['configurations']['dolphin-alert']) |
||||
|
||||
# application-api.properties |
||||
dolphin_app_api_map = {} |
||||
dolphin_app_api_map['logging.config'] = 'classpath:apiserver_logback.xml' |
||||
dolphin_app_api_map['spring.messages.basename'] = 'i18n/messages' |
||||
dolphin_app_api_map['server.servlet.context-path'] = '/dolphinscheduler/' |
||||
dolphin_app_api_map.update(config['configurations']['dolphin-application-api']) |
||||
|
||||
# application-dao.properties |
||||
dolphin_application_map = {} |
||||
dolphin_application_map['spring.datasource.type'] = 'com.alibaba.druid.pool.DruidDataSource' |
||||
dolphin_application_map['spring.datasource.driver-class-name'] = dolphin_database_config['dolphin_database_driver'] |
||||
dolphin_application_map['spring.datasource.url'] = dolphin_database_config['dolphin_database_url'] |
||||
dolphin_application_map['spring.datasource.username'] = dolphin_database_config['dolphin_database_username'] |
||||
dolphin_application_map['spring.datasource.password'] = dolphin_database_config['dolphin_database_password'] |
||||
dolphin_application_map.update(config['configurations']['dolphin-application']) |
||||
|
||||
# common.properties |
||||
dolphin_common_map = {} |
||||
|
||||
if 'yarn-site' in config['configurations'] and \ |
||||
'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']: |
||||
yarn_resourcemanager_webapp_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] |
||||
yarn_application_status_address = 'http://' + yarn_resourcemanager_webapp_address + '/ws/v1/cluster/apps/%s' |
||||
dolphin_common_map['yarn.application.status.address'] = yarn_application_status_address |
||||
|
||||
rmHosts = default("/clusterHostInfo/rm_host", []) |
||||
if len(rmHosts) > 1: |
||||
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ','.join(rmHosts) |
||||
else: |
||||
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = '' |
||||
|
||||
dolphin_common_map_tmp = config['configurations']['dolphin-common'] |
||||
data_basedir_path = dolphin_common_map_tmp['data.basedir.path'] |
||||
process_exec_basepath = data_basedir_path + '/exec' |
||||
data_download_basedir_path = data_basedir_path + '/download' |
||||
dolphin_common_map['process.exec.basepath'] = process_exec_basepath |
||||
dolphin_common_map['data.download.basedir.path'] = data_download_basedir_path |
||||
dolphin_common_map['dolphinscheduler.env.path'] = dolphin_env_path |
||||
|
||||
zookeeperHosts = default("/clusterHostInfo/zookeeper_hosts", []) |
||||
if len(zookeeperHosts) > 0 and "clientPort" in config['configurations']['zoo.cfg']: |
||||
clientPort = config['configurations']['zoo.cfg']['clientPort'] |
||||
zookeeperPort = ":" + clientPort + "," |
||||
dolphin_common_map['zookeeper.quorum'] = zookeeperPort.join(zookeeperHosts) + ":" + clientPort |
||||
|
||||
dolphin_common_map.update(config['configurations']['dolphin-common']) |
||||
|
||||
# quartz.properties |
||||
dolphin_quartz_map = {} |
||||
dolphin_quartz_map['org.quartz.jobStore.driverDelegateClass'] = dolphin_database_config['driverDelegateClass'] |
||||
dolphin_quartz_map['org.quartz.dataSource.myDs.driver'] = dolphin_database_config['dolphin_database_driver'] |
||||
dolphin_quartz_map['org.quartz.dataSource.myDs.URL'] = dolphin_database_config['dolphin_database_url'] |
||||
dolphin_quartz_map['org.quartz.dataSource.myDs.user'] = dolphin_database_config['dolphin_database_username'] |
||||
dolphin_quartz_map['org.quartz.dataSource.myDs.password'] = dolphin_database_config['dolphin_database_password'] |
||||
dolphin_quartz_map.update(config['configurations']['dolphin-quartz']) |
||||
|
||||
# if 'ganglia_server_host' in config['clusterHostInfo'] and \ |
||||
# len(config['clusterHostInfo']['ganglia_server_host'])>0: |
||||
# ganglia_installed = True |
||||
# ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0] |
||||
# ganglia_report_interval = 60 |
||||
# else: |
||||
# ganglia_installed = False |
@ -0,0 +1,31 @@
|
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
|
||||
""" |
||||
|
||||
from resource_management import * |
||||
from resource_management.libraries.functions import get_unique_id_and_date |
||||
|
||||
class ServiceCheck(Script): |
||||
def service_check(self, env): |
||||
import params |
||||
#env.set_params(params) |
||||
|
||||
# Execute(format("which pika_server")) |
||||
|
||||
if __name__ == "__main__": |
||||
ServiceCheck().execute() |
@ -0,0 +1,23 @@
|
||||
""" |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
""" |
||||
|
||||
from resource_management import * |
||||
|
||||
config = Script.get_config() |
||||
|
||||
dolphin_run_dir = "/opt/soft/run/dolphinscheduler/" |
@ -0,0 +1,20 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
{% for key, value in dolphin_application_map.iteritems() -%} |
||||
{{key}}={{value}} |
||||
{% endfor %} |
@ -0,0 +1,119 @@
|
||||
#!/bin/sh |
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
usage="Usage: dolphinscheduler-daemon.sh (start|stop) <command> " |
||||
|
||||
# if no args specified, show usage |
||||
if [ $# -le 1 ]; then |
||||
echo $usage |
||||
exit 1 |
||||
fi |
||||
|
||||
startStop=$1 |
||||
shift |
||||
command=$1 |
||||
shift |
||||
|
||||
echo "Begin $startStop $command......" |
||||
|
||||
BIN_DIR=`dirname $0` |
||||
BIN_DIR=`cd "$BIN_DIR"; pwd` |
||||
DOLPHINSCHEDULER_HOME=$BIN_DIR/.. |
||||
|
||||
export HOSTNAME=`hostname` |
||||
|
||||
DOLPHINSCHEDULER_LIB_JARS={{dolphin_lib_jars}} |
||||
|
||||
DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms4g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70" |
||||
STOP_TIMEOUT=5 |
||||
|
||||
log={{dolphin_log_dir}}/dolphinscheduler-$command-$HOSTNAME.out |
||||
pid={{dolphin_pidfile_dir}}/$command.pid |
||||
|
||||
cd $DOLPHINSCHEDULER_HOME |
||||
|
||||
if [ "$command" = "api-server" ]; then |
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/apiserver_logback.xml -Dspring.profiles.active=api" |
||||
CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer |
||||
elif [ "$command" = "master-server" ]; then |
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/master_logback.xml -Ddruid.mysql.usePingMethod=false" |
||||
CLASS=org.apache.dolphinscheduler.server.master.MasterServer |
||||
elif [ "$command" = "worker-server" ]; then |
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/worker_logback.xml -Ddruid.mysql.usePingMethod=false" |
||||
CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer |
||||
elif [ "$command" = "alert-server" ]; then |
||||
LOG_FILE="-Dlogback.configurationFile={{dolphin_conf_dir}}/alert_logback.xml" |
||||
CLASS=org.apache.dolphinscheduler.alert.AlertServer |
||||
elif [ "$command" = "logger-server" ]; then |
||||
CLASS=org.apache.dolphinscheduler.server.rpc.LoggerServer |
||||
elif [ "$command" = "combined-server" ]; then |
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/combined_logback.xml -Dspring.profiles.active=api -Dserver.is-combined-server=true" |
||||
CLASS=org.apache.dolphinscheduler.api.CombinedApplicationServer |
||||
else |
||||
echo "Error: No command named \`$command' was found." |
||||
exit 1 |
||||
fi |
||||
|
||||
case $startStop in |
||||
(start) |
||||
|
||||
if [ -f $pid ]; then |
||||
if kill -0 `cat $pid` > /dev/null 2>&1; then |
||||
echo $command running as process `cat $pid`. Stop it first. |
||||
exit 1 |
||||
fi |
||||
fi |
||||
|
||||
echo starting $command, logging to $log |
||||
|
||||
exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath {{dolphin_conf_dir}}:{{dolphin_lib_jars}} $CLASS" |
||||
|
||||
echo "nohup java $exec_command > $log 2>&1 < /dev/null &" |
||||
nohup java $exec_command > $log 2>&1 < /dev/null & |
||||
echo $! > $pid |
||||
;; |
||||
|
||||
(stop) |
||||
|
||||
if [ -f $pid ]; then |
||||
TARGET_PID=`cat $pid` |
||||
if kill -0 $TARGET_PID > /dev/null 2>&1; then |
||||
echo stopping $command |
||||
kill $TARGET_PID |
||||
sleep $STOP_TIMEOUT |
||||
if kill -0 $TARGET_PID > /dev/null 2>&1; then |
||||
echo "$command did not stop gracefully after $STOP_TIMEOUT seconds: killing with kill -9" |
||||
kill -9 $TARGET_PID |
||||
fi |
||||
else |
||||
echo no $command to stop |
||||
fi |
||||
rm -f $pid |
||||
else |
||||
echo no $command to stop |
||||
fi |
||||
;; |
||||
|
||||
(*) |
||||
echo $usage |
||||
exit 1 |
||||
;; |
||||
|
||||
esac |
||||
|
||||
echo "End $startStop $command." |
@ -0,0 +1,26 @@
|
||||
{ |
||||
"name": "default", |
||||
"description": "default quick links configuration", |
||||
"configuration": { |
||||
"protocol": |
||||
{ |
||||
"type":"http" |
||||
}, |
||||
|
||||
"links": [ |
||||
{ |
||||
"name": "dolphin-application-ui", |
||||
"label": "DolphinApplication UI", |
||||
"requires_user_name": "false", |
||||
"component_name": "DOLPHIN_API", |
||||
"url": "%@://%@:%@/dolphinscheduler/ui/view/login/index.html", |
||||
"port":{ |
||||
"http_property": "server.port", |
||||
"http_default_port": "12345", |
||||
"regex": "^(\\d+)$", |
||||
"site": "dolphin-application-api" |
||||
} |
||||
} |
||||
] |
||||
} |
||||
} |
@ -0,0 +1,605 @@
|
||||
{ |
||||
"name": "default", |
||||
"description": "Default theme for Dolphin Scheduler service", |
||||
"configuration": { |
||||
"layouts": [ |
||||
{ |
||||
"name": "default", |
||||
"tabs": [ |
||||
{ |
||||
"name": "settings", |
||||
"display-name": "Settings", |
||||
"layout": { |
||||
"tab-rows": "3", |
||||
"tab-columns": "3", |
||||
"sections": [ |
||||
{ |
||||
"name": "dolphin-env-config", |
||||
"display-name": "Dolphin Env Config", |
||||
"row-index": "0", |
||||
"column-index": "0", |
||||
"row-span": "1", |
||||
"column-span": "2", |
||||
"section-rows": "1", |
||||
"section-columns": "2", |
||||
"subsections": [ |
||||
{ |
||||
"name": "env-row1-col1", |
||||
"display-name": "Deploy User Info", |
||||
"row-index": "0", |
||||
"column-index": "0", |
||||
"row-span": "1", |
||||
"column-span": "1" |
||||
}, |
||||
{ |
||||
"name": "env-row1-col2", |
||||
"display-name": "System Env Optimization", |
||||
"row-index": "0", |
||||
"column-index": "1", |
||||
"row-span": "1", |
||||
"column-span": "1" |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"name": "dolphin-database-config", |
||||
"display-name": "Database Config", |
||||
"row-index": "1", |
||||
"column-index": "0", |
||||
"row-span": "1", |
||||
"column-span": "2", |
||||
"section-rows": "1", |
||||
"section-columns": "3", |
||||
"subsections": [ |
||||
{ |
||||
"name": "database-row1-col1", |
||||
"row-index": "0", |
||||
"column-index": "0", |
||||
"row-span": "1", |
||||
"column-span": "1" |
||||
}, |
||||
{ |
||||
"name": "database-row1-col2", |
||||
"row-index": "0", |
||||
"column-index": "1", |
||||
"row-span": "1", |
||||
"column-span": "1" |
||||
}, |
||||
{ |
||||
"name": "database-row1-col3", |
||||
"row-index": "0", |
||||
"column-index": "2", |
||||
"row-span": "1", |
||||
"column-span": "1" |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"name": "dynamic-config", |
||||
"row-index": "2", |
||||
"column-index": "0", |
||||
"row-span": "1", |
||||
"column-span": "2", |
||||
"section-rows": "1", |
||||
"section-columns": "3", |
||||
"subsections": [ |
||||
{ |
||||
"name": "dynamic-row1-col1", |
||||
"display-name": "Resource FS Config", |
||||
"row-index": "0", |
||||
"column-index": "0", |
||||
"row-span": "1", |
||||
"column-span": "1" |
||||
}, |
||||
{ |
||||
"name": "dynamic-row1-col2", |
||||
"display-name": "Kerberos Info", |
||||
"row-index": "0", |
||||
"column-index": "1", |
||||
"row-span": "1", |
||||
"column-span": "1" |
||||
}, |
||||
{ |
||||
"name": "dynamic-row1-col3", |
||||
"display-name": "Wechat Info", |
||||
"row-index": "0", |
||||
"column-index": "1", |
||||
"row-span": "1", |
||||
"column-span": "1" |
||||
} |
||||
] |
||||
} |
||||
] |
||||
} |
||||
} |
||||
] |
||||
} |
||||
], |
||||
"placement": { |
||||
"configuration-layout": "default", |
||||
"configs": [ |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.type", |
||||
"subsection-name": "database-row1-col1" |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.host", |
||||
"subsection-name": "database-row1-col2" |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.port", |
||||
"subsection-name": "database-row1-col2" |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.username", |
||||
"subsection-name": "database-row1-col3" |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.password", |
||||
"subsection-name": "database-row1-col3" |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.user", |
||||
"subsection-name": "env-row1-col1" |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.group", |
||||
"subsection-name": "env-row1-col1" |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphinscheduler-env-content", |
||||
"subsection-name": "env-row1-col2" |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/res.upload.startup.type", |
||||
"subsection-name": "dynamic-row1-col1" |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/hdfs.root.user", |
||||
"subsection-name": "dynamic-row1-col1", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-common/res.upload.startup.type" |
||||
], |
||||
"if": "${dolphin-common/res.upload.startup.type} === HDFS", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/data.store2hdfs.basepath", |
||||
"subsection-name": "dynamic-row1-col1", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-common/res.upload.startup.type" |
||||
], |
||||
"if": "${dolphin-common/res.upload.startup.type} === HDFS", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/fs.defaultFS", |
||||
"subsection-name": "dynamic-row1-col1", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-common/res.upload.startup.type" |
||||
], |
||||
"if": "${dolphin-common/res.upload.startup.type} === HDFS", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/fs.s3a.endpoint", |
||||
"subsection-name": "dynamic-row1-col1", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-common/res.upload.startup.type" |
||||
], |
||||
"if": "${dolphin-common/res.upload.startup.type} === S3", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/fs.s3a.access.key", |
||||
"subsection-name": "dynamic-row1-col1", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-common/res.upload.startup.type" |
||||
], |
||||
"if": "${dolphin-common/res.upload.startup.type} === S3", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/fs.s3a.secret.key", |
||||
"subsection-name": "dynamic-row1-col1", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-common/res.upload.startup.type" |
||||
], |
||||
"if": "${dolphin-common/res.upload.startup.type} === S3", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/hadoop.security.authentication.startup.state", |
||||
"subsection-name": "dynamic-row1-col2" |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/java.security.krb5.conf.path", |
||||
"subsection-name": "dynamic-row1-col2", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-common/hadoop.security.authentication.startup.state" |
||||
], |
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/login.user.keytab.username", |
||||
"subsection-name": "dynamic-row1-col2", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-common/hadoop.security.authentication.startup.state" |
||||
], |
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/login.user.keytab.path", |
||||
"subsection-name": "dynamic-row1-col2", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-common/hadoop.security.authentication.startup.state" |
||||
], |
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.enable", |
||||
"subsection-name": "dynamic-row1-col3" |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.corp.id", |
||||
"subsection-name": "dynamic-row1-col3", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-alert/enterprise.wechat.enable" |
||||
], |
||||
"if": "${dolphin-alert/enterprise.wechat.enable}", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.secret", |
||||
"subsection-name": "dynamic-row1-col3", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-alert/enterprise.wechat.enable" |
||||
], |
||||
"if": "${dolphin-alert/enterprise.wechat.enable}", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.agent.id", |
||||
"subsection-name": "dynamic-row1-col3", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-alert/enterprise.wechat.enable" |
||||
], |
||||
"if": "${dolphin-alert/enterprise.wechat.enable}", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.users", |
||||
"subsection-name": "dynamic-row1-col3", |
||||
"depends-on": [ |
||||
{ |
||||
"configs":[ |
||||
"dolphin-alert/enterprise.wechat.enable" |
||||
], |
||||
"if": "${dolphin-alert/enterprise.wechat.enable}", |
||||
"then": { |
||||
"property_value_attributes": { |
||||
"visible": true |
||||
} |
||||
}, |
||||
"else": { |
||||
"property_value_attributes": { |
||||
"visible": false |
||||
} |
||||
} |
||||
} |
||||
] |
||||
} |
||||
] |
||||
}, |
||||
"widgets": [ |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.type", |
||||
"widget": { |
||||
"type": "combo" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.host", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.port", |
||||
"widget": { |
||||
"type": "text-field", |
||||
"units": [ |
||||
{ |
||||
"unit-name": "int" |
||||
} |
||||
] |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.username", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.database.password", |
||||
"widget": { |
||||
"type": "password" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.user", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphin.group", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-env/dolphinscheduler-env-content", |
||||
"widget": { |
||||
"type": "text-area" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/res.upload.startup.type", |
||||
"widget": { |
||||
"type": "combo" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/hdfs.root.user", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/data.store2hdfs.basepath", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/fs.defaultFS", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/fs.s3a.endpoint", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/fs.s3a.access.key", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/fs.s3a.secret.key", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/hadoop.security.authentication.startup.state", |
||||
"widget": { |
||||
"type": "toggle" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/java.security.krb5.conf.path", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/login.user.keytab.username", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-common/login.user.keytab.path", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.enable", |
||||
"widget": { |
||||
"type": "toggle" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.corp.id", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.secret", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.agent.id", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
}, |
||||
{ |
||||
"config": "dolphin-alert/enterprise.wechat.users", |
||||
"widget": { |
||||
"type": "text-field" |
||||
} |
||||
} |
||||
] |
||||
} |
||||
} |
Binary file not shown.
@ -0,0 +1,26 @@
|
||||
<?xml version="1.0"?> |
||||
<!-- |
||||
Licensed to the Apache Software Foundation (ASF) under one or more |
||||
contributor license agreements. See the NOTICE file distributed with |
||||
this work for additional information regarding copyright ownership. |
||||
The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
(the "License"); you may not use this file except in compliance with |
||||
the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
--> |
||||
<metainfo> |
||||
<schemaVersion>2.0</schemaVersion> |
||||
<services> |
||||
<service> |
||||
<name>DOLPHIN</name> |
||||
<extends>common-services/DOLPHIN/1.2.1</extends> |
||||
</service> |
||||
</services> |
||||
</metainfo> |
@ -0,0 +1,24 @@
|
||||
version: '2' |
||||
services: |
||||
zookeeper: |
||||
image: zookeeper |
||||
restart: always |
||||
container_name: zookeeper |
||||
ports: |
||||
- "2181:2181" |
||||
environment: |
||||
ZOO_MY_ID: 1 |
||||
db: |
||||
image: postgres |
||||
container_name: postgres |
||||
environment: |
||||
- POSTGRES_USER=test |
||||
- POSTGRES_PASSWORD=test |
||||
- POSTGRES_DB=dolphinscheduler |
||||
ports: |
||||
- "5432:5432" |
||||
volumes: |
||||
- pgdata:/var/lib/postgresql/data |
||||
- ./postgres/docker-entrypoint-initdb:/docker-entrypoint-initdb.d |
||||
volumes: |
||||
pgdata: |
@ -0,0 +1,771 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0 |
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; |
||||
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; |
||||
DROP TABLE IF EXISTS QRTZ_LOCKS; |
||||
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_TRIGGERS; |
||||
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; |
||||
DROP TABLE IF EXISTS QRTZ_CALENDARS; |
||||
|
||||
CREATE TABLE QRTZ_JOB_DETAILS( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
JOB_NAME character varying(200) NOT NULL, |
||||
JOB_GROUP character varying(200) NOT NULL, |
||||
DESCRIPTION character varying(250) NULL, |
||||
JOB_CLASS_NAME character varying(250) NOT NULL, |
||||
IS_DURABLE boolean NOT NULL, |
||||
IS_NONCONCURRENT boolean NOT NULL, |
||||
IS_UPDATE_DATA boolean NOT NULL, |
||||
REQUESTS_RECOVERY boolean NOT NULL, |
||||
JOB_DATA bytea NULL); |
||||
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
JOB_NAME character varying(200) NOT NULL, |
||||
JOB_GROUP character varying(200) NOT NULL, |
||||
DESCRIPTION character varying(250) NULL, |
||||
NEXT_FIRE_TIME BIGINT NULL, |
||||
PREV_FIRE_TIME BIGINT NULL, |
||||
PRIORITY INTEGER NULL, |
||||
TRIGGER_STATE character varying(16) NOT NULL, |
||||
TRIGGER_TYPE character varying(8) NOT NULL, |
||||
START_TIME BIGINT NOT NULL, |
||||
END_TIME BIGINT NULL, |
||||
CALENDAR_NAME character varying(200) NULL, |
||||
MISFIRE_INSTR SMALLINT NULL, |
||||
JOB_DATA bytea NULL) ; |
||||
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
REPEAT_COUNT BIGINT NOT NULL, |
||||
REPEAT_INTERVAL BIGINT NOT NULL, |
||||
TIMES_TRIGGERED BIGINT NOT NULL) ; |
||||
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_CRON_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
CRON_EXPRESSION character varying(120) NOT NULL, |
||||
TIME_ZONE_ID character varying(80)) ; |
||||
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_SIMPROP_TRIGGERS |
||||
( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
STR_PROP_1 character varying(512) NULL, |
||||
STR_PROP_2 character varying(512) NULL, |
||||
STR_PROP_3 character varying(512) NULL, |
||||
INT_PROP_1 INT NULL, |
||||
INT_PROP_2 INT NULL, |
||||
LONG_PROP_1 BIGINT NULL, |
||||
LONG_PROP_2 BIGINT NULL, |
||||
DEC_PROP_1 NUMERIC(13,4) NULL, |
||||
DEC_PROP_2 NUMERIC(13,4) NULL, |
||||
BOOL_PROP_1 boolean NULL, |
||||
BOOL_PROP_2 boolean NULL) ; |
||||
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_BLOB_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
BLOB_DATA bytea NULL) ; |
||||
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_CALENDARS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
CALENDAR_NAME character varying(200) NOT NULL, |
||||
CALENDAR bytea NOT NULL) ; |
||||
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME); |
||||
|
||||
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL) ; |
||||
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP); |
||||
|
||||
CREATE TABLE QRTZ_FIRED_TRIGGERS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
ENTRY_ID character varying(95) NOT NULL, |
||||
TRIGGER_NAME character varying(200) NOT NULL, |
||||
TRIGGER_GROUP character varying(200) NOT NULL, |
||||
INSTANCE_NAME character varying(200) NOT NULL, |
||||
FIRED_TIME BIGINT NOT NULL, |
||||
SCHED_TIME BIGINT NOT NULL, |
||||
PRIORITY INTEGER NOT NULL, |
||||
STATE character varying(16) NOT NULL, |
||||
JOB_NAME character varying(200) NULL, |
||||
JOB_GROUP character varying(200) NULL, |
||||
IS_NONCONCURRENT boolean NULL, |
||||
REQUESTS_RECOVERY boolean NULL) ; |
||||
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID); |
||||
|
||||
CREATE TABLE QRTZ_SCHEDULER_STATE ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
INSTANCE_NAME character varying(200) NOT NULL, |
||||
LAST_CHECKIN_TIME BIGINT NOT NULL, |
||||
CHECKIN_INTERVAL BIGINT NOT NULL) ; |
||||
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME); |
||||
|
||||
CREATE TABLE QRTZ_LOCKS ( |
||||
SCHED_NAME character varying(120) NOT NULL, |
||||
LOCK_NAME character varying(40) NOT NULL) ; |
||||
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME); |
||||
|
||||
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY); |
||||
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP); |
||||
|
||||
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); |
||||
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP); |
||||
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME); |
||||
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); |
||||
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE); |
||||
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); |
||||
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); |
||||
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME); |
||||
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); |
||||
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); |
||||
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); |
||||
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); |
||||
|
||||
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME); |
||||
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); |
||||
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); |
||||
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP); |
||||
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); |
||||
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); |
||||
|
||||
|
||||
-- |
||||
-- Table structure for table t_ds_access_token |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_access_token; |
||||
CREATE TABLE t_ds_access_token ( |
||||
id int NOT NULL , |
||||
user_id int DEFAULT NULL , |
||||
token varchar(64) DEFAULT NULL , |
||||
expire_time timestamp DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_alert |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_alert; |
||||
CREATE TABLE t_ds_alert ( |
||||
id int NOT NULL , |
||||
title varchar(64) DEFAULT NULL , |
||||
show_type int DEFAULT NULL , |
||||
content text , |
||||
alert_type int DEFAULT NULL , |
||||
alert_status int DEFAULT '0' , |
||||
log text , |
||||
alertgroup_id int DEFAULT NULL , |
||||
receivers text , |
||||
receivers_cc text , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
-- |
||||
-- Table structure for table t_ds_alertgroup |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_alertgroup; |
||||
CREATE TABLE t_ds_alertgroup ( |
||||
id int NOT NULL , |
||||
group_name varchar(255) DEFAULT NULL , |
||||
group_type int DEFAULT NULL , |
||||
description varchar(255) DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_command |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_command; |
||||
CREATE TABLE t_ds_command ( |
||||
id int NOT NULL , |
||||
command_type int DEFAULT NULL , |
||||
process_definition_id int DEFAULT NULL , |
||||
command_param text , |
||||
task_depend_type int DEFAULT NULL , |
||||
failure_strategy int DEFAULT '0' , |
||||
warning_type int DEFAULT '0' , |
||||
warning_group_id int DEFAULT NULL , |
||||
schedule_time timestamp DEFAULT NULL , |
||||
start_time timestamp DEFAULT NULL , |
||||
executor_id int DEFAULT NULL , |
||||
dependence varchar(255) DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
process_instance_priority int DEFAULT NULL , |
||||
worker_group_id int DEFAULT '-1' , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_datasource |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_datasource; |
||||
CREATE TABLE t_ds_datasource ( |
||||
id int NOT NULL , |
||||
name varchar(64) NOT NULL , |
||||
note varchar(256) DEFAULT NULL , |
||||
type int NOT NULL , |
||||
user_id int NOT NULL , |
||||
connection_params text NOT NULL , |
||||
create_time timestamp NOT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_error_command |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_error_command; |
||||
CREATE TABLE t_ds_error_command ( |
||||
id int NOT NULL , |
||||
command_type int DEFAULT NULL , |
||||
executor_id int DEFAULT NULL , |
||||
process_definition_id int DEFAULT NULL , |
||||
command_param text , |
||||
task_depend_type int DEFAULT NULL , |
||||
failure_strategy int DEFAULT '0' , |
||||
warning_type int DEFAULT '0' , |
||||
warning_group_id int DEFAULT NULL , |
||||
schedule_time timestamp DEFAULT NULL , |
||||
start_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
dependence text , |
||||
process_instance_priority int DEFAULT NULL , |
||||
worker_group_id int DEFAULT '-1' , |
||||
message text , |
||||
PRIMARY KEY (id) |
||||
); |
||||
-- |
||||
-- Table structure for table t_ds_master_server |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_master_server; |
||||
CREATE TABLE t_ds_master_server ( |
||||
id int NOT NULL , |
||||
host varchar(45) DEFAULT NULL , |
||||
port int DEFAULT NULL , |
||||
zk_directory varchar(64) DEFAULT NULL , |
||||
res_info varchar(256) DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
last_heartbeat_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_process_definition |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_process_definition; |
||||
CREATE TABLE t_ds_process_definition ( |
||||
id int NOT NULL , |
||||
name varchar(255) DEFAULT NULL , |
||||
version int DEFAULT NULL , |
||||
release_state int DEFAULT NULL , |
||||
project_id int DEFAULT NULL , |
||||
user_id int DEFAULT NULL , |
||||
process_definition_json text , |
||||
description text , |
||||
global_params text , |
||||
flag int DEFAULT NULL , |
||||
locations text , |
||||
connects text , |
||||
receivers text , |
||||
receivers_cc text , |
||||
create_time timestamp DEFAULT NULL , |
||||
timeout int DEFAULT '0' , |
||||
tenant_id int NOT NULL DEFAULT '-1' , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
create index process_definition_index on t_ds_process_definition (project_id,id); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_process_instance |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_process_instance; |
||||
CREATE TABLE t_ds_process_instance ( |
||||
id int NOT NULL , |
||||
name varchar(255) DEFAULT NULL , |
||||
process_definition_id int DEFAULT NULL , |
||||
state int DEFAULT NULL , |
||||
recovery int DEFAULT NULL , |
||||
start_time timestamp DEFAULT NULL , |
||||
end_time timestamp DEFAULT NULL , |
||||
run_times int DEFAULT NULL , |
||||
host varchar(45) DEFAULT NULL , |
||||
command_type int DEFAULT NULL , |
||||
command_param text , |
||||
task_depend_type int DEFAULT NULL , |
||||
max_try_times int DEFAULT '0' , |
||||
failure_strategy int DEFAULT '0' , |
||||
warning_type int DEFAULT '0' , |
||||
warning_group_id int DEFAULT NULL , |
||||
schedule_time timestamp DEFAULT NULL , |
||||
command_start_time timestamp DEFAULT NULL , |
||||
global_params text , |
||||
process_instance_json text , |
||||
flag int DEFAULT '1' , |
||||
update_time timestamp NULL , |
||||
is_sub_process int DEFAULT '0' , |
||||
executor_id int NOT NULL , |
||||
locations text , |
||||
connects text , |
||||
history_cmd text , |
||||
dependence_schedule_times text , |
||||
process_instance_priority int DEFAULT NULL , |
||||
worker_group_id int DEFAULT '-1' , |
||||
timeout int DEFAULT '0' , |
||||
tenant_id int NOT NULL DEFAULT '-1' , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
create index process_instance_index on t_ds_process_instance (process_definition_id,id); |
||||
create index start_time_index on t_ds_process_instance (start_time); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_project |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_project; |
||||
CREATE TABLE t_ds_project ( |
||||
id int NOT NULL , |
||||
name varchar(100) DEFAULT NULL , |
||||
description varchar(200) DEFAULT NULL , |
||||
user_id int DEFAULT NULL , |
||||
flag int DEFAULT '1' , |
||||
create_time timestamp DEFAULT CURRENT_TIMESTAMP , |
||||
update_time timestamp DEFAULT CURRENT_TIMESTAMP , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
create index user_id_index on t_ds_project (user_id); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_queue |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_queue; |
||||
CREATE TABLE t_ds_queue ( |
||||
id int NOT NULL , |
||||
queue_name varchar(64) DEFAULT NULL , |
||||
queue varchar(64) DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_datasource_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_datasource_user; |
||||
CREATE TABLE t_ds_relation_datasource_user ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
datasource_id int DEFAULT NULL , |
||||
perm int DEFAULT '1' , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_process_instance |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_process_instance; |
||||
CREATE TABLE t_ds_relation_process_instance ( |
||||
id int NOT NULL , |
||||
parent_process_instance_id int DEFAULT NULL , |
||||
parent_task_instance_id int DEFAULT NULL , |
||||
process_instance_id int DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_project_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_project_user; |
||||
CREATE TABLE t_ds_relation_project_user ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
project_id int DEFAULT NULL , |
||||
perm int DEFAULT '1' , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
create index relation_project_user_id_index on t_ds_relation_project_user (user_id); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_resources_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_resources_user; |
||||
CREATE TABLE t_ds_relation_resources_user ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
resources_id int DEFAULT NULL , |
||||
perm int DEFAULT '1' , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_udfs_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_udfs_user; |
||||
CREATE TABLE t_ds_relation_udfs_user ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
udf_id int DEFAULT NULL , |
||||
perm int DEFAULT '1' , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_relation_user_alertgroup |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; |
||||
CREATE TABLE t_ds_relation_user_alertgroup ( |
||||
id int NOT NULL, |
||||
alertgroup_id int DEFAULT NULL, |
||||
user_id int DEFAULT NULL, |
||||
create_time timestamp DEFAULT NULL, |
||||
update_time timestamp DEFAULT NULL, |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_resources |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_resources; |
||||
CREATE TABLE t_ds_resources ( |
||||
id int NOT NULL , |
||||
alias varchar(64) DEFAULT NULL , |
||||
file_name varchar(64) DEFAULT NULL , |
||||
description varchar(256) DEFAULT NULL , |
||||
user_id int DEFAULT NULL , |
||||
type int DEFAULT NULL , |
||||
size bigint DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_schedules |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_schedules; |
||||
CREATE TABLE t_ds_schedules ( |
||||
id int NOT NULL , |
||||
process_definition_id int NOT NULL , |
||||
start_time timestamp NOT NULL , |
||||
end_time timestamp NOT NULL , |
||||
crontab varchar(256) NOT NULL , |
||||
failure_strategy int NOT NULL , |
||||
user_id int NOT NULL , |
||||
release_state int NOT NULL , |
||||
warning_type int NOT NULL , |
||||
warning_group_id int DEFAULT NULL , |
||||
process_instance_priority int DEFAULT NULL , |
||||
worker_group_id int DEFAULT '-1' , |
||||
create_time timestamp NOT NULL , |
||||
update_time timestamp NOT NULL , |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_session |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_session; |
||||
CREATE TABLE t_ds_session ( |
||||
id varchar(64) NOT NULL , |
||||
user_id int DEFAULT NULL , |
||||
ip varchar(45) DEFAULT NULL , |
||||
last_login_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_task_instance |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_task_instance; |
||||
CREATE TABLE t_ds_task_instance ( |
||||
id int NOT NULL , |
||||
name varchar(255) DEFAULT NULL , |
||||
task_type varchar(64) DEFAULT NULL , |
||||
process_definition_id int DEFAULT NULL , |
||||
process_instance_id int DEFAULT NULL , |
||||
task_json text , |
||||
state int DEFAULT NULL , |
||||
submit_time timestamp DEFAULT NULL , |
||||
start_time timestamp DEFAULT NULL , |
||||
end_time timestamp DEFAULT NULL , |
||||
host varchar(45) DEFAULT NULL , |
||||
execute_path varchar(200) DEFAULT NULL , |
||||
log_path varchar(200) DEFAULT NULL , |
||||
alert_flag int DEFAULT NULL , |
||||
retry_times int DEFAULT '0' , |
||||
pid int DEFAULT NULL , |
||||
app_link varchar(255) DEFAULT NULL , |
||||
flag int DEFAULT '1' , |
||||
retry_interval int DEFAULT NULL , |
||||
max_retry_times int DEFAULT NULL , |
||||
task_instance_priority int DEFAULT NULL , |
||||
worker_group_id int DEFAULT '-1' , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_tenant |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_tenant; |
||||
CREATE TABLE t_ds_tenant ( |
||||
id int NOT NULL , |
||||
tenant_code varchar(64) DEFAULT NULL , |
||||
tenant_name varchar(64) DEFAULT NULL , |
||||
description varchar(256) DEFAULT NULL , |
||||
queue_id int DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_udfs |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_udfs; |
||||
CREATE TABLE t_ds_udfs ( |
||||
id int NOT NULL , |
||||
user_id int NOT NULL , |
||||
func_name varchar(100) NOT NULL , |
||||
class_name varchar(255) NOT NULL , |
||||
type int NOT NULL , |
||||
arg_types varchar(255) DEFAULT NULL , |
||||
database varchar(255) DEFAULT NULL , |
||||
description varchar(255) DEFAULT NULL , |
||||
resource_id int NOT NULL , |
||||
resource_name varchar(255) NOT NULL , |
||||
create_time timestamp NOT NULL , |
||||
update_time timestamp NOT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_user |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_user; |
||||
CREATE TABLE t_ds_user ( |
||||
id int NOT NULL , |
||||
user_name varchar(64) DEFAULT NULL , |
||||
user_password varchar(64) DEFAULT NULL , |
||||
user_type int DEFAULT NULL , |
||||
email varchar(64) DEFAULT NULL , |
||||
phone varchar(11) DEFAULT NULL , |
||||
tenant_id int DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
queue varchar(64) DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_version |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_version; |
||||
CREATE TABLE t_ds_version ( |
||||
id int NOT NULL , |
||||
version varchar(200) NOT NULL, |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
create index version_index on t_ds_version(version); |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_worker_group |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_worker_group; |
||||
CREATE TABLE t_ds_worker_group ( |
||||
id bigint NOT NULL , |
||||
name varchar(256) DEFAULT NULL , |
||||
ip_list varchar(256) DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
update_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
-- |
||||
-- Table structure for table t_ds_worker_server |
||||
-- |
||||
|
||||
DROP TABLE IF EXISTS t_ds_worker_server; |
||||
CREATE TABLE t_ds_worker_server ( |
||||
id int NOT NULL , |
||||
host varchar(45) DEFAULT NULL , |
||||
port int DEFAULT NULL , |
||||
zk_directory varchar(64) DEFAULT NULL , |
||||
res_info varchar(255) DEFAULT NULL , |
||||
create_time timestamp DEFAULT NULL , |
||||
last_heartbeat_time timestamp DEFAULT NULL , |
||||
PRIMARY KEY (id) |
||||
) ; |
||||
|
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence; |
||||
CREATE SEQUENCE t_ds_access_token_id_sequence; |
||||
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence; |
||||
CREATE SEQUENCE t_ds_alert_id_sequence; |
||||
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence; |
||||
CREATE SEQUENCE t_ds_alertgroup_id_sequence; |
||||
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence; |
||||
CREATE SEQUENCE t_ds_command_id_sequence; |
||||
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence; |
||||
CREATE SEQUENCE t_ds_datasource_id_sequence; |
||||
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_master_server_id_sequence; |
||||
CREATE SEQUENCE t_ds_master_server_id_sequence; |
||||
ALTER TABLE t_ds_master_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_master_server_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence; |
||||
CREATE SEQUENCE t_ds_process_definition_id_sequence; |
||||
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence; |
||||
CREATE SEQUENCE t_ds_process_instance_id_sequence; |
||||
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence; |
||||
CREATE SEQUENCE t_ds_project_id_sequence; |
||||
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence; |
||||
CREATE SEQUENCE t_ds_queue_id_sequence; |
||||
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence; |
||||
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence; |
||||
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_project_user_id_sequence; |
||||
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence; |
||||
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence; |
||||
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_relation_user_alertgroup_id_sequence; |
||||
CREATE SEQUENCE t_ds_relation_user_alertgroup_id_sequence; |
||||
ALTER TABLE t_ds_relation_user_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_user_alertgroup_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence; |
||||
CREATE SEQUENCE t_ds_resources_id_sequence; |
||||
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence; |
||||
CREATE SEQUENCE t_ds_schedules_id_sequence; |
||||
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence; |
||||
CREATE SEQUENCE t_ds_task_instance_id_sequence; |
||||
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence; |
||||
CREATE SEQUENCE t_ds_tenant_id_sequence; |
||||
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence; |
||||
CREATE SEQUENCE t_ds_udfs_id_sequence; |
||||
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence; |
||||
CREATE SEQUENCE t_ds_user_id_sequence; |
||||
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence; |
||||
CREATE SEQUENCE t_ds_version_id_sequence; |
||||
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence'); |
||||
|
||||
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence; |
||||
CREATE SEQUENCE t_ds_worker_group_id_sequence; |
||||
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence'); |
||||
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence; |
||||
CREATE SEQUENCE t_ds_worker_server_id_sequence; |
||||
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence'); |
||||
|
||||
|
||||
-- Records of t_ds_user,user : admin , password : dolphinscheduler123 |
||||
INSERT INTO t_ds_user(user_name,user_password,user_type,email,phone,tenant_id,create_time,update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', 'xx', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22'); |
||||
|
||||
-- Records of t_ds_alertgroup,dolphinscheduler warning group |
||||
INSERT INTO t_ds_alertgroup(group_name,group_type,description,create_time,update_time) VALUES ('dolphinscheduler warning group', '0', 'dolphinscheduler warning group','2018-11-29 10:20:39', '2018-11-29 10:20:39'); |
||||
INSERT INTO t_ds_relation_user_alertgroup(alertgroup_id,user_id,create_time,update_time) VALUES ( '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); |
||||
|
||||
-- Records of t_ds_queue,default queue name : default |
||||
INSERT INTO t_ds_queue(queue_name,queue,create_time,update_time) VALUES ('default', 'default','2018-11-29 10:22:33', '2018-11-29 10:22:33'); |
||||
|
||||
-- Records of t_ds_queue,default queue name : default |
||||
INSERT INTO t_ds_version(version) VALUES ('1.2.0'); |
@ -1,11 +1,306 @@
|
||||
## Build Image |
||||
## What is Dolphin Scheduler? |
||||
|
||||
Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing. |
||||
|
||||
Github URL: https://github.com/apache/incubator-dolphinscheduler |
||||
|
||||
Official Website: https://dolphinscheduler.apache.org |
||||
|
||||
![Dolphin Scheduler](https://dolphinscheduler.apache.org/img/hlogo_colorful.svg) |
||||
|
||||
[![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md) |
||||
[![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md) |
||||
|
||||
## How to use this docker image |
||||
|
||||
#### You can start a dolphinscheduler instance |
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
The default postgres user `root`, postgres password `root` and database `dolphinscheduler` are created in the `startup.sh`. |
||||
|
||||
The default zookeeper is created in the `startup.sh`. |
||||
|
||||
#### Or via Environment Variables **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`ZOOKEEPER_QUORUM`** |
||||
|
||||
You can specify **existing postgres service**. Example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
You can specify **existing zookeeper service**. Example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
#### Or start a standalone dolphinscheduler server |
||||
|
||||
You can start a standalone dolphinscheduler server. |
||||
|
||||
* Start a **master server**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler master-server |
||||
``` |
||||
|
||||
* Start a **worker server**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler worker-server |
||||
``` |
||||
|
||||
* Start a **api server**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 12345:12345 \ |
||||
dolphinscheduler api-server |
||||
``` |
||||
|
||||
* Start a **alert server**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler alert-server |
||||
``` |
||||
|
||||
* Start a **frontend**, For example: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler frontend |
||||
``` |
||||
|
||||
**Note**: You must be specify `POSTGRESQL_HOST` `POSTGRESQL_PORT` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server. |
||||
|
||||
## How to build a docker image |
||||
|
||||
You can build a docker image in A Unix-like operating system, You can also build it in Windows operating system. |
||||
|
||||
In Unix-Like, Example: |
||||
|
||||
```bash |
||||
$ cd path/incubator-dolphinscheduler |
||||
$ sh ./dockerfile/hooks/build |
||||
``` |
||||
|
||||
In Windows, Example: |
||||
|
||||
```bat |
||||
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat |
||||
``` |
||||
|
||||
Please read `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat` script files if you don't understand |
||||
|
||||
## Environment Variables |
||||
|
||||
The Dolphin Scheduler image uses several environment variables which are easy to miss. While none of the variables are required, they may significantly aid you in using the image. |
||||
|
||||
**`POSTGRESQL_HOST`** |
||||
|
||||
This environment variable sets the host for PostgreSQL. The default value is `127.0.0.1`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`. |
||||
|
||||
**`POSTGRESQL_PORT`** |
||||
|
||||
This environment variable sets the port for PostgreSQL. The default value is `5432`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`. |
||||
|
||||
**`POSTGRESQL_USERNAME`** |
||||
|
||||
This environment variable sets the username for PostgreSQL. The default value is `root`. |
||||
|
||||
**`POSTGRESQL_PASSWORD`** |
||||
|
||||
This environment variable sets the password for PostgreSQL. The default value is `root`. |
||||
|
||||
**`DOLPHINSCHEDULER_ENV_PATH`** |
||||
|
||||
This environment variable sets the runtime environment for task. The default value is `/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh`. |
||||
|
||||
**`TASK_QUEUE`** |
||||
|
||||
This environment variable sets the task queue for `master-server` and `worker-serverr`. The default value is `zookeeper`. |
||||
|
||||
**`ZOOKEEPER_QUORUM`** |
||||
|
||||
This environment variable sets zookeeper quorum for `master-server` and `worker-serverr`. The default value is `127.0.0.1:2181`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`. |
||||
|
||||
**`MASTER_EXEC_THREADS`** |
||||
|
||||
This environment variable sets exec thread num for `master-server`. The default value is `100`. |
||||
|
||||
**`MASTER_EXEC_TASK_NUM`** |
||||
|
||||
This environment variable sets exec task num for `master-server`. The default value is `20`. |
||||
|
||||
**`MASTER_HEARTBEAT_INTERVAL`** |
||||
|
||||
This environment variable sets heartbeat interval for `master-server`. The default value is `10`. |
||||
|
||||
**`MASTER_TASK_COMMIT_RETRYTIMES`** |
||||
|
||||
This environment variable sets task commit retry times for `master-server`. The default value is `5`. |
||||
|
||||
**`MASTER_TASK_COMMIT_INTERVAL`** |
||||
|
||||
This environment variable sets task commit interval for `master-server`. The default value is `1000`. |
||||
|
||||
**`MASTER_MAX_CPULOAD_AVG`** |
||||
|
||||
This environment variable sets max cpu load avg for `master-server`. The default value is `100`. |
||||
|
||||
**`MASTER_RESERVED_MEMORY`** |
||||
|
||||
This environment variable sets reserved memory for `master-server`. The default value is `0.1`. |
||||
|
||||
**`WORKER_EXEC_THREADS`** |
||||
|
||||
This environment variable sets exec thread num for `worker-server`. The default value is `100`. |
||||
|
||||
**`WORKER_HEARTBEAT_INTERVAL`** |
||||
|
||||
This environment variable sets heartbeat interval for `worker-server`. The default value is `10`. |
||||
|
||||
**`WORKER_FETCH_TASK_NUM`** |
||||
|
||||
This environment variable sets fetch task num for `worker-server`. The default value is `3`. |
||||
|
||||
**`WORKER_MAX_CPULOAD_AVG`** |
||||
|
||||
This environment variable sets max cpu load avg for `worker-server`. The default value is `100`. |
||||
|
||||
**`WORKER_RESERVED_MEMORY`** |
||||
|
||||
This environment variable sets reserved memory for `worker-server`. The default value is `0.1`. |
||||
|
||||
**`XLS_FILE_PATH`** |
||||
|
||||
This environment variable sets xls file path for `alert-server`. The default value is `/tmp/xls`. |
||||
|
||||
**`MAIL_SERVER_HOST`** |
||||
|
||||
This environment variable sets mail server host for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_SERVER_PORT`** |
||||
|
||||
This environment variable sets mail server port for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_SENDER`** |
||||
|
||||
This environment variable sets mail sender for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_USER=`** |
||||
|
||||
This environment variable sets mail user for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_PASSWD`** |
||||
|
||||
This environment variable sets mail password for `alert-server`. The default value is empty. |
||||
|
||||
**`MAIL_SMTP_STARTTLS_ENABLE`** |
||||
|
||||
This environment variable sets SMTP tls for `alert-server`. The default value is `true`. |
||||
|
||||
**`MAIL_SMTP_SSL_ENABLE`** |
||||
|
||||
This environment variable sets SMTP ssl for `alert-server`. The default value is `false`. |
||||
|
||||
**`MAIL_SMTP_SSL_TRUST`** |
||||
|
||||
This environment variable sets SMTP ssl truest for `alert-server`. The default value is empty. |
||||
|
||||
**`ENTERPRISE_WECHAT_ENABLE`** |
||||
|
||||
This environment variable sets enterprise wechat enable for `alert-server`. The default value is `false`. |
||||
|
||||
**`ENTERPRISE_WECHAT_CORP_ID`** |
||||
|
||||
This environment variable sets enterprise wechat corp id for `alert-server`. The default value is empty. |
||||
|
||||
**`ENTERPRISE_WECHAT_SECRET`** |
||||
|
||||
This environment variable sets enterprise wechat secret for `alert-server`. The default value is empty. |
||||
|
||||
**`ENTERPRISE_WECHAT_AGENT_ID`** |
||||
|
||||
This environment variable sets enterprise wechat agent id for `alert-server`. The default value is empty. |
||||
|
||||
**`ENTERPRISE_WECHAT_USERS`** |
||||
|
||||
This environment variable sets enterprise wechat users for `alert-server`. The default value is empty. |
||||
|
||||
**`FRONTEND_API_SERVER_HOST`** |
||||
|
||||
This environment variable sets api server host for `frontend`. The default value is `127.0.0.1`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`. |
||||
|
||||
**`FRONTEND_API_SERVER_PORT`** |
||||
|
||||
This environment variable sets api server port for `frontend`. The default value is `123451`. |
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`. |
||||
|
||||
## Initialization scripts |
||||
|
||||
If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`. |
||||
|
||||
For example, to add an environment variable `API_SERVER_PORT` in `/root/start-init-conf.sh`: |
||||
|
||||
``` |
||||
export API_SERVER_PORT=5555 |
||||
``` |
||||
|
||||
and to modify `/opt/dolphinscheduler/conf/application-api.properties.tpl` template file, add server port: |
||||
``` |
||||
cd .. |
||||
docker build -t dolphinscheduler --build-arg version=1.1.0 --build-arg tar_version=1.1.0-SNAPSHOT -f dockerfile/Dockerfile . |
||||
docker run -p 12345:12345 -p 8888:8888 --rm --name dolphinscheduler -d dolphinscheduler |
||||
server.port=${API_SERVER_PORT} |
||||
``` |
||||
* Visit the url: http://127.0.0.1:8888 |
||||
* UserName:admin Password:dolphinscheduler123 |
||||
|
||||
## Note |
||||
* MacOS: The memory of docker needs to be set to 4G, default 2G. Steps: Preferences -> Advanced -> adjust resources -> Apply & Restart |
||||
`/root/start-init-conf.sh` will dynamically generate config file: |
||||
|
||||
```sh |
||||
echo "generate app config" |
||||
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do |
||||
eval "cat << EOF |
||||
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line}) |
||||
EOF |
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} |
||||
done |
||||
|
||||
echo "generate nginx config" |
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
``` |
||||
|
@ -0,0 +1,306 @@
|
||||
## Dolphin Scheduler是什么? |
||||
|
||||
一个分布式易扩展的可视化DAG工作流任务调度系统。致力于解决数据处理流程中错综复杂的依赖关系,使调度系统在数据处理流程中`开箱即用`。 |
||||
|
||||
Github URL: https://github.com/apache/incubator-dolphinscheduler |
||||
|
||||
Official Website: https://dolphinscheduler.apache.org |
||||
|
||||
![Dolphin Scheduler](https://dolphinscheduler.apache.org/img/hlogo_colorful.svg) |
||||
|
||||
[![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md) |
||||
[![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md) |
||||
|
||||
## 如何使用docker镜像 |
||||
|
||||
#### 你可以运行一个dolphinscheduler实例 |
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
在`startup.sh`脚本中,默认的创建`Postgres`的用户、密码和数据库,默认值分别为:`root`、`root`、`dolphinscheduler`。 |
||||
|
||||
同时,默认的`Zookeeper`也会在`startup.sh`脚本中被创建。 |
||||
|
||||
#### 或者通过环境变量 **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`ZOOKEEPER_QUORUM`** 使用已存在的服务 |
||||
|
||||
你可以指定一个已经存在的 **`Postgres`** 服务. 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
你也可以指定一个已经存在的 **Zookeeper** 服务. 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler all |
||||
``` |
||||
|
||||
#### 或者运行dolphinscheduler中的部分服务 |
||||
|
||||
你能够运行dolphinscheduler中的部分服务。 |
||||
|
||||
* 启动一个 **master server**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler master-server |
||||
``` |
||||
|
||||
* 启动一个 **worker server**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181" |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler worker-server |
||||
``` |
||||
|
||||
* 启动一个 **api server**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
-p 12345:12345 \ |
||||
dolphinscheduler api-server |
||||
``` |
||||
|
||||
* 启动一个 **alert server**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" \ |
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \ |
||||
dolphinscheduler alert-server |
||||
``` |
||||
|
||||
* 启动一个 **frontend**, 如下: |
||||
|
||||
``` |
||||
$ docker run -dit --name dolphinscheduler \ |
||||
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \ |
||||
-p 8888:8888 \ |
||||
dolphinscheduler frontend |
||||
``` |
||||
|
||||
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `POSTGRESQL_HOST` `POSTGRESQL_PORT` `ZOOKEEPER_QUORUM`。 |
||||
|
||||
## 如何构建一个docker镜像 |
||||
|
||||
你能够在类Unix系统和Windows系统中构建一个docker镜像。 |
||||
|
||||
类Unix系统, 如下: |
||||
|
||||
```bash |
||||
$ cd path/incubator-dolphinscheduler |
||||
$ sh ./dockerfile/hooks/build |
||||
``` |
||||
|
||||
Windows系统, 如下: |
||||
|
||||
```bat |
||||
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat |
||||
``` |
||||
|
||||
如果你不理解这些脚本 `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat`,请阅读里面的内容。 |
||||
|
||||
## 环境变量 |
||||
|
||||
Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些变量不是必须的,但是可以帮助你更容易配置镜像并根据你的需求定义相应的服务配置。 |
||||
|
||||
**`POSTGRESQL_HOST`** |
||||
|
||||
配置`PostgreSQL`的`HOST`, 默认值 `127.0.0.1`。 |
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。 |
||||
|
||||
**`POSTGRESQL_PORT`** |
||||
|
||||
配置`PostgreSQL`的`PORT`, 默认值 `5432`。 |
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。 |
||||
|
||||
**`POSTGRESQL_USERNAME`** |
||||
|
||||
配置`PostgreSQL`的`USERNAME`, 默认值 `root`。 |
||||
|
||||
**`POSTGRESQL_PASSWORD`** |
||||
|
||||
配置`PostgreSQL`的`PASSWORD`, 默认值 `root`。 |
||||
|
||||
**`DOLPHINSCHEDULER_ENV_PATH`** |
||||
|
||||
任务执行时的环境变量配置文件, 默认值 `/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh`。 |
||||
|
||||
**`TASK_QUEUE`** |
||||
|
||||
配置`master-server`和`worker-serverr`的`Zookeeper`任务队列名, 默认值 `zookeeper`。 |
||||
|
||||
**`ZOOKEEPER_QUORUM`** |
||||
|
||||
配置`master-server`和`worker-serverr`的`Zookeeper`地址, 默认值 `127.0.0.1:2181`。 |
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。 |
||||
|
||||
**`MASTER_EXEC_THREADS`** |
||||
|
||||
配置`master-server`中的执行线程数量,默认值 `100`。 |
||||
|
||||
**`MASTER_EXEC_TASK_NUM`** |
||||
|
||||
配置`master-server`中的执行任务数量,默认值 `20`。 |
||||
|
||||
**`MASTER_HEARTBEAT_INTERVAL`** |
||||
|
||||
配置`master-server`中的心跳交互时间,默认值 `10`。 |
||||
|
||||
**`MASTER_TASK_COMMIT_RETRYTIMES`** |
||||
|
||||
配置`master-server`中的任务提交重试次数,默认值 `5`。 |
||||
|
||||
**`MASTER_TASK_COMMIT_INTERVAL`** |
||||
|
||||
配置`master-server`中的任务提交交互时间,默认值 `1000`。 |
||||
|
||||
**`MASTER_MAX_CPULOAD_AVG`** |
||||
|
||||
配置`master-server`中的CPU中的`load average`值,默认值 `100`。 |
||||
|
||||
**`MASTER_RESERVED_MEMORY`** |
||||
|
||||
配置`master-server`的保留内存,默认值 `0.1`。 |
||||
|
||||
**`WORKER_EXEC_THREADS`** |
||||
|
||||
配置`worker-server`中的执行线程数量,默认值 `100`。 |
||||
|
||||
**`WORKER_HEARTBEAT_INTERVAL`** |
||||
|
||||
配置`worker-server`中的心跳交互时间,默认值 `10`。 |
||||
|
||||
**`WORKER_FETCH_TASK_NUM`** |
||||
|
||||
配置`worker-server`中的获取任务的数量,默认值 `3`。 |
||||
|
||||
**`WORKER_MAX_CPULOAD_AVG`** |
||||
|
||||
配置`worker-server`中的CPU中的最大`load average`值,默认值 `100`。 |
||||
|
||||
**`WORKER_RESERVED_MEMORY`** |
||||
|
||||
配置`worker-server`的保留内存,默认值 `0.1`。 |
||||
|
||||
**`XLS_FILE_PATH`** |
||||
|
||||
配置`alert-server`的`XLS`文件的存储路径,默认值 `/tmp/xls`。 |
||||
|
||||
**`MAIL_SERVER_HOST`** |
||||
|
||||
配置`alert-server`的邮件服务地址,默认值 `空`。 |
||||
|
||||
**`MAIL_SERVER_PORT`** |
||||
|
||||
配置`alert-server`的邮件服务端口,默认值 `空`。 |
||||
|
||||
**`MAIL_SENDER`** |
||||
|
||||
配置`alert-server`的邮件发送人,默认值 `空`。 |
||||
|
||||
**`MAIL_USER=`** |
||||
|
||||
配置`alert-server`的邮件服务用户名,默认值 `空`。 |
||||
|
||||
**`MAIL_PASSWD`** |
||||
|
||||
配置`alert-server`的邮件服务用户密码,默认值 `空`。 |
||||
|
||||
**`MAIL_SMTP_STARTTLS_ENABLE`** |
||||
|
||||
配置`alert-server`的邮件服务是否启用TLS,默认值 `true`。 |
||||
|
||||
**`MAIL_SMTP_SSL_ENABLE`** |
||||
|
||||
配置`alert-server`的邮件服务是否启用SSL,默认值 `false`。 |
||||
|
||||
**`MAIL_SMTP_SSL_TRUST`** |
||||
|
||||
配置`alert-server`的邮件服务SSL的信任地址,默认值 `空`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_ENABLE`** |
||||
|
||||
配置`alert-server`的邮件服务是否启用企业微信,默认值 `false`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_CORP_ID`** |
||||
|
||||
配置`alert-server`的邮件服务企业微信`ID`,默认值 `空`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_SECRET`** |
||||
|
||||
配置`alert-server`的邮件服务企业微信`SECRET`,默认值 `空`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_AGENT_ID`** |
||||
|
||||
配置`alert-server`的邮件服务企业微信`AGENT_ID`,默认值 `空`。 |
||||
|
||||
**`ENTERPRISE_WECHAT_USERS`** |
||||
|
||||
配置`alert-server`的邮件服务企业微信`USERS`,默认值 `空`。 |
||||
|
||||
**`FRONTEND_API_SERVER_HOST`** |
||||
|
||||
配置`frontend`的连接`api-server`的地址,默认值 `127.0.0.1`。 |
||||
|
||||
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。 |
||||
|
||||
**`FRONTEND_API_SERVER_PORT`** |
||||
|
||||
配置`frontend`的连接`api-server`的端口,默认值 `12345`。 |
||||
|
||||
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。 |
||||
|
||||
## 初始化脚本 |
||||
|
||||
如果你想在编译的时候或者运行的时候附加一些其它的操作及新增一些环境变量,你可以在`/root/start-init-conf.sh`文件中进行修改,同时如果涉及到配置文件的修改,请在`/opt/dolphinscheduler/conf/*.tpl`中修改相应的配置文件 |
||||
|
||||
例如,在`/root/start-init-conf.sh`添加一个环境变量`API_SERVER_PORT`: |
||||
|
||||
``` |
||||
export API_SERVER_PORT=5555 |
||||
``` |
||||
|
||||
当添加以上环境变量后,你应该在相应的模板文件`/opt/dolphinscheduler/conf/application-api.properties.tpl`中添加这个环境变量配置: |
||||
``` |
||||
server.port=${API_SERVER_PORT} |
||||
``` |
||||
|
||||
`/root/start-init-conf.sh`将根据模板文件动态的生成配置文件: |
||||
|
||||
```sh |
||||
echo "generate app config" |
||||
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do |
||||
eval "cat << EOF |
||||
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line}) |
||||
EOF |
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} |
||||
done |
||||
|
||||
echo "generate nginx config" |
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
``` |
@ -0,0 +1,50 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
#alert type is EMAIL/SMS |
||||
alert.type=EMAIL |
||||
|
||||
# alter msg template, default is html template |
||||
#alert.template=html |
||||
# mail server configuration |
||||
mail.protocol=SMTP |
||||
mail.server.host=${MAIL_SERVER_HOST} |
||||
mail.server.port=${MAIL_SERVER_PORT} |
||||
mail.sender=${MAIL_SENDER} |
||||
mail.user=${MAIL_USER} |
||||
mail.passwd=${MAIL_PASSWD} |
||||
# TLS |
||||
mail.smtp.starttls.enable=${MAIL_SMTP_STARTTLS_ENABLE} |
||||
# SSL |
||||
mail.smtp.ssl.enable=${MAIL_SMTP_SSL_ENABLE} |
||||
mail.smtp.ssl.trust=${MAIL_SMTP_SSL_TRUST} |
||||
|
||||
#xls file path,need create if not exist |
||||
xls.file.path=${XLS_FILE_PATH} |
||||
|
||||
# Enterprise WeChat configuration |
||||
enterprise.wechat.enable=${ENTERPRISE_WECHAT_ENABLE} |
||||
enterprise.wechat.corp.id=${ENTERPRISE_WECHAT_CORP_ID} |
||||
enterprise.wechat.secret=${ENTERPRISE_WECHAT_SECRET} |
||||
enterprise.wechat.agent.id=${ENTERPRISE_WECHAT_AGENT_ID} |
||||
enterprise.wechat.users=${ENTERPRISE_WECHAT_USERS} |
||||
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret |
||||
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token |
||||
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"} |
||||
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}} |
||||
|
||||
|
||||
|
@ -1,49 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html --> |
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> |
||||
<property name="log.base" value="logs" /> |
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> |
||||
<encoder> |
||||
<pattern> |
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||
</pattern> |
||||
<charset>UTF-8</charset> |
||||
</encoder> |
||||
</appender> |
||||
|
||||
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> |
||||
<file>${log.base}/dolphinscheduler-alert.log</file> |
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> |
||||
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> |
||||
<maxHistory>20</maxHistory> |
||||
<maxFileSize>64MB</maxFileSize> |
||||
</rollingPolicy> |
||||
<encoder> |
||||
<pattern> |
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||
</pattern> |
||||
<charset>UTF-8</charset> |
||||
</encoder> |
||||
</appender> |
||||
|
||||
<root level="INFO"> |
||||
<appender-ref ref="ALERTLOGFILE"/> |
||||
</root> |
||||
</configuration> |
@ -1,60 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html --> |
||||
<configuration scan="true" scanPeriod="120 seconds"> |
||||
<logger name="org.apache.zookeeper" level="WARN"/> |
||||
<logger name="org.apache.hbase" level="WARN"/> |
||||
<logger name="org.apache.hadoop" level="WARN"/> |
||||
|
||||
<property name="log.base" value="logs" /> |
||||
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> |
||||
<encoder> |
||||
<pattern> |
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||
</pattern> |
||||
<charset>UTF-8</charset> |
||||
</encoder> |
||||
</appender> |
||||
|
||||
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> |
||||
<!-- Log level filter --> |
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter"> |
||||
<level>INFO</level> |
||||
</filter> |
||||
<file>${log.base}/dolphinscheduler-api-server.log</file> |
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> |
||||
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> |
||||
<maxHistory>168</maxHistory> |
||||
<maxFileSize>64MB</maxFileSize> |
||||
</rollingPolicy> |
||||
|
||||
<encoder> |
||||
<pattern> |
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||
</pattern> |
||||
<charset>UTF-8</charset> |
||||
</encoder> |
||||
|
||||
</appender> |
||||
|
||||
<root level="INFO"> |
||||
<appender-ref ref="APISERVERLOGFILE" /> |
||||
</root> |
||||
</configuration> |
@ -1,35 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml |
||||
# to the conf directory,support s3,for example : s3a://dolphinscheduler |
||||
fs.defaultFS=hdfs://mycluster:8020 |
||||
|
||||
# s3 need,s3 endpoint |
||||
fs.s3a.endpoint=http://192.168.199.91:9010 |
||||
|
||||
# s3 need,s3 access key |
||||
fs.s3a.access.key=A3DXS30FO22544RE |
||||
|
||||
# s3 need,s3 secret key |
||||
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK |
||||
|
||||
#resourcemanager ha note this need ips , this empty if single |
||||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx |
||||
|
||||
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine |
||||
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s |
@ -1,20 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
export PYTHON_HOME=/usr/bin/python |
||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 |
||||
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH |
@ -1,252 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
QUERY_SCHEDULE_LIST_NOTES=query schedule list |
||||
EXECUTE_PROCESS_TAG=execute process related operation |
||||
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation |
||||
RUN_PROCESS_INSTANCE_NOTES=run process instance |
||||
START_NODE_LIST=start node list(node name) |
||||
TASK_DEPEND_TYPE=task depend type |
||||
COMMAND_TYPE=command type |
||||
RUN_MODE=run mode |
||||
TIMEOUT=timeout |
||||
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance |
||||
EXECUTE_TYPE=execute type |
||||
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition |
||||
GET_RECEIVER_CC_NOTES=query receiver cc |
||||
DESC=description |
||||
GROUP_NAME=group name |
||||
GROUP_TYPE=group type |
||||
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list |
||||
UPDATE_ALERT_GROUP_NOTES=update alert group |
||||
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id |
||||
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not |
||||
GRANT_ALERT_GROUP_NOTES=grant alert group |
||||
USER_IDS=user id list |
||||
ALERT_GROUP_TAG=alert group related operation |
||||
CREATE_ALERT_GROUP_NOTES=create alert group |
||||
WORKER_GROUP_TAG=worker group related operation |
||||
SAVE_WORKER_GROUP_NOTES=create worker group |
||||
WORKER_GROUP_NAME=worker group name |
||||
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 |
||||
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging |
||||
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list |
||||
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id |
||||
DATA_ANALYSIS_TAG=analysis related operation of task state |
||||
COUNT_TASK_STATE_NOTES=count task state |
||||
COUNT_PROCESS_INSTANCE_NOTES=count process instance state |
||||
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user |
||||
COUNT_COMMAND_STATE_NOTES=count command state |
||||
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ |
||||
|
||||
ACCESS_TOKEN_TAG=access token related operation |
||||
MONITOR_TAG=monitor related operation |
||||
MASTER_LIST_NOTES=master server list |
||||
WORKER_LIST_NOTES=worker server list |
||||
QUERY_DATABASE_STATE_NOTES=query database state |
||||
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE |
||||
TASK_STATE=task instance state |
||||
SOURCE_TABLE=SOURCE TABLE |
||||
DEST_TABLE=dest table |
||||
TASK_DATE=task date |
||||
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging |
||||
DATA_SOURCE_TAG=data source related operation |
||||
CREATE_DATA_SOURCE_NOTES=create data source |
||||
DATA_SOURCE_NAME=data source name |
||||
DATA_SOURCE_NOTE=data source desc |
||||
DB_TYPE=database type |
||||
DATA_SOURCE_HOST=DATA SOURCE HOST |
||||
DATA_SOURCE_PORT=data source port |
||||
DATABASE_NAME=database name |
||||
QUEUE_TAG=queue related operation |
||||
QUERY_QUEUE_LIST_NOTES=query queue list |
||||
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging |
||||
CREATE_QUEUE_NOTES=create queue |
||||
YARN_QUEUE_NAME=yarn(hadoop) queue name |
||||
QUEUE_ID=queue id |
||||
TENANT_DESC=tenant desc |
||||
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging |
||||
QUERY_TENANT_LIST_NOTES=query tenant list |
||||
UPDATE_TENANT_NOTES=update tenant |
||||
DELETE_TENANT_NOTES=delete tenant |
||||
RESOURCES_TAG=resource center related operation |
||||
CREATE_RESOURCE_NOTES=create resource |
||||
RESOURCE_TYPE=resource file type |
||||
RESOURCE_NAME=resource name |
||||
RESOURCE_DESC=resource file desc |
||||
RESOURCE_FILE=resource file |
||||
RESOURCE_ID=resource id |
||||
QUERY_RESOURCE_LIST_NOTES=query resource list |
||||
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id |
||||
VIEW_RESOURCE_BY_ID_NOTES=view resource by id |
||||
ONLINE_CREATE_RESOURCE_NOTES=online create resource |
||||
SUFFIX=resource file suffix |
||||
CONTENT=resource file content |
||||
UPDATE_RESOURCE_NOTES=edit resource file online |
||||
DOWNLOAD_RESOURCE_NOTES=download resource file |
||||
CREATE_UDF_FUNCTION_NOTES=create udf function |
||||
UDF_TYPE=UDF type |
||||
FUNC_NAME=function name |
||||
CLASS_NAME=package and class name |
||||
ARG_TYPES=arguments |
||||
UDF_DESC=udf desc |
||||
VIEW_UDF_FUNCTION_NOTES=view udf function |
||||
UPDATE_UDF_FUNCTION_NOTES=update udf function |
||||
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging |
||||
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name |
||||
DELETE_UDF_FUNCTION_NOTES=delete udf function |
||||
AUTHORIZED_FILE_NOTES=authorized file |
||||
UNAUTHORIZED_FILE_NOTES=unauthorized file |
||||
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func |
||||
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func |
||||
VERIFY_QUEUE_NOTES=verify queue |
||||
TENANT_TAG=tenant related operation |
||||
CREATE_TENANT_NOTES=create tenant |
||||
TENANT_CODE=tenant code |
||||
TENANT_NAME=tenant name |
||||
QUEUE_NAME=queue name |
||||
PASSWORD=password |
||||
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} |
||||
PROJECT_TAG=project related operation |
||||
CREATE_PROJECT_NOTES=create project |
||||
PROJECT_DESC=project description |
||||
UPDATE_PROJECT_NOTES=update project |
||||
PROJECT_ID=project id |
||||
QUERY_PROJECT_BY_ID_NOTES=query project info by project id |
||||
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING |
||||
DELETE_PROJECT_BY_ID_NOTES=delete project by id |
||||
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project |
||||
QUERY_ALL_PROJECT_LIST_NOTES=query all project list |
||||
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project |
||||
TASK_RECORD_TAG=task record related operation |
||||
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging |
||||
CREATE_TOKEN_NOTES=create token ,note: please login first |
||||
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging |
||||
SCHEDULE=schedule |
||||
WARNING_TYPE=warning type(sending strategy) |
||||
WARNING_GROUP_ID=warning group id |
||||
FAILURE_STRATEGY=failure strategy |
||||
RECEIVERS=receivers |
||||
RECEIVERS_CC=receivers cc |
||||
WORKER_GROUP_ID=worker server group id |
||||
PROCESS_INSTANCE_PRIORITY=process instance priority |
||||
UPDATE_SCHEDULE_NOTES=update schedule |
||||
SCHEDULE_ID=schedule id |
||||
ONLINE_SCHEDULE_NOTES=online schedule |
||||
OFFLINE_SCHEDULE_NOTES=offline schedule |
||||
QUERY_SCHEDULE_NOTES=query schedule |
||||
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging |
||||
LOGIN_TAG=User login related operations |
||||
USER_NAME=user name |
||||
PROJECT_NAME=project name |
||||
CREATE_PROCESS_DEFINITION_NOTES=create process definition |
||||
PROCESS_DEFINITION_NAME=process definition name |
||||
PROCESS_DEFINITION_JSON=process definition detail info (json format) |
||||
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) |
||||
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) |
||||
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) |
||||
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) |
||||
PROCESS_DEFINITION_DESC=process definition desc |
||||
PROCESS_DEFINITION_TAG=process definition related opertation |
||||
SIGNOUT_NOTES=logout |
||||
USER_PASSWORD=user password |
||||
UPDATE_PROCESS_INSTANCE_NOTES=update process instance |
||||
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list |
||||
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name |
||||
LOGIN_NOTES=user login |
||||
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition |
||||
PROCESS_DEFINITION_ID=process definition id |
||||
PROCESS_DEFINITION_IDS=process definition ids |
||||
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition |
||||
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id |
||||
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list |
||||
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging |
||||
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list |
||||
PAGE_NO=page no |
||||
PROCESS_INSTANCE_ID=process instance id |
||||
PROCESS_INSTANCE_JSON=process instance info(json format) |
||||
SCHEDULE_TIME=schedule time |
||||
SYNC_DEFINE=update the information of the process instance to the process definition\ |
||||
|
||||
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance |
||||
SEARCH_VAL=search val |
||||
USER_ID=user id |
||||
PAGE_SIZE=page size |
||||
LIMIT=limit |
||||
VIEW_TREE_NOTES=view tree |
||||
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id |
||||
PROCESS_DEFINITION_ID_LIST=process definition id list |
||||
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id |
||||
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id |
||||
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids |
||||
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id |
||||
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id |
||||
TASK_ID=task instance id |
||||
SKIP_LINE_NUM=skip line num |
||||
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log |
||||
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log |
||||
USERS_TAG=users related operation |
||||
SCHEDULER_TAG=scheduler related operation |
||||
CREATE_SCHEDULE_NOTES=create schedule |
||||
CREATE_USER_NOTES=create user |
||||
TENANT_ID=tenant id |
||||
QUEUE=queue |
||||
EMAIL=email |
||||
PHONE=phone |
||||
QUERY_USER_LIST_NOTES=query user list |
||||
UPDATE_USER_NOTES=update user |
||||
DELETE_USER_BY_ID_NOTES=delete user by id |
||||
GRANT_PROJECT_NOTES=GRANT PROJECT |
||||
PROJECT_IDS=project ids(string format, multiple projects separated by ",") |
||||
GRANT_RESOURCE_NOTES=grant resource file |
||||
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") |
||||
GET_USER_INFO_NOTES=get user info |
||||
LIST_USER_NOTES=list user |
||||
VERIFY_USER_NAME_NOTES=verify user name |
||||
UNAUTHORIZED_USER_NOTES=cancel authorization |
||||
ALERT_GROUP_ID=alert group id |
||||
AUTHORIZED_USER_NOTES=authorized user |
||||
GRANT_UDF_FUNC_NOTES=grant udf function |
||||
UDF_IDS=udf ids(string format, multiple udf functions separated by ",") |
||||
GRANT_DATASOURCE_NOTES=grant datasource |
||||
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") |
||||
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id |
||||
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id |
||||
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables |
||||
VIEW_GANTT_NOTES=view gantt |
||||
SUB_PROCESS_INSTANCE_ID=sub process instance id |
||||
TASK_NAME=task instance name |
||||
TASK_INSTANCE_TAG=task instance related operation |
||||
LOGGER_TAG=log related operation |
||||
PROCESS_INSTANCE_TAG=process instance related operation |
||||
EXECUTION_STATUS=runing status for workflow and task nodes |
||||
HOST=ip address of running task |
||||
START_DATE=start date |
||||
END_DATE=end date |
||||
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id |
||||
UPDATE_DATA_SOURCE_NOTES=update data source |
||||
DATA_SOURCE_ID=DATA SOURCE ID |
||||
QUERY_DATA_SOURCE_NOTES=query data source by id |
||||
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type |
||||
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging |
||||
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE |
||||
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test |
||||
DELETE_DATA_SOURCE_NOTES=delete data source |
||||
VERIFY_DATA_SOURCE_NOTES=verify data source |
||||
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source |
||||
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source |
||||
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id |
@ -1,252 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
QUERY_SCHEDULE_LIST_NOTES=query schedule list |
||||
EXECUTE_PROCESS_TAG=execute process related operation |
||||
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation |
||||
RUN_PROCESS_INSTANCE_NOTES=run process instance |
||||
START_NODE_LIST=start node list(node name) |
||||
TASK_DEPEND_TYPE=task depend type |
||||
COMMAND_TYPE=command type |
||||
RUN_MODE=run mode |
||||
TIMEOUT=timeout |
||||
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance |
||||
EXECUTE_TYPE=execute type |
||||
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition |
||||
GET_RECEIVER_CC_NOTES=query receiver cc |
||||
DESC=description |
||||
GROUP_NAME=group name |
||||
GROUP_TYPE=group type |
||||
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list |
||||
UPDATE_ALERT_GROUP_NOTES=update alert group |
||||
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id |
||||
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not |
||||
GRANT_ALERT_GROUP_NOTES=grant alert group |
||||
USER_IDS=user id list |
||||
ALERT_GROUP_TAG=alert group related operation |
||||
CREATE_ALERT_GROUP_NOTES=create alert group |
||||
WORKER_GROUP_TAG=worker group related operation |
||||
SAVE_WORKER_GROUP_NOTES=create worker group |
||||
WORKER_GROUP_NAME=worker group name |
||||
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2 |
||||
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging |
||||
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list |
||||
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id |
||||
DATA_ANALYSIS_TAG=analysis related operation of task state |
||||
COUNT_TASK_STATE_NOTES=count task state |
||||
COUNT_PROCESS_INSTANCE_NOTES=count process instance state |
||||
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user |
||||
COUNT_COMMAND_STATE_NOTES=count command state |
||||
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\ |
||||
|
||||
ACCESS_TOKEN_TAG=access token related operation |
||||
MONITOR_TAG=monitor related operation |
||||
MASTER_LIST_NOTES=master server list |
||||
WORKER_LIST_NOTES=worker server list |
||||
QUERY_DATABASE_STATE_NOTES=query database state |
||||
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE |
||||
TASK_STATE=task instance state |
||||
SOURCE_TABLE=SOURCE TABLE |
||||
DEST_TABLE=dest table |
||||
TASK_DATE=task date |
||||
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging |
||||
DATA_SOURCE_TAG=data source related operation |
||||
CREATE_DATA_SOURCE_NOTES=create data source |
||||
DATA_SOURCE_NAME=data source name |
||||
DATA_SOURCE_NOTE=data source desc |
||||
DB_TYPE=database type |
||||
DATA_SOURCE_HOST=DATA SOURCE HOST |
||||
DATA_SOURCE_PORT=data source port |
||||
DATABASE_NAME=database name |
||||
QUEUE_TAG=queue related operation |
||||
QUERY_QUEUE_LIST_NOTES=query queue list |
||||
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging |
||||
CREATE_QUEUE_NOTES=create queue |
||||
YARN_QUEUE_NAME=yarn(hadoop) queue name |
||||
QUEUE_ID=queue id |
||||
TENANT_DESC=tenant desc |
||||
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging |
||||
QUERY_TENANT_LIST_NOTES=query tenant list |
||||
UPDATE_TENANT_NOTES=update tenant |
||||
DELETE_TENANT_NOTES=delete tenant |
||||
RESOURCES_TAG=resource center related operation |
||||
CREATE_RESOURCE_NOTES=create resource |
||||
RESOURCE_TYPE=resource file type |
||||
RESOURCE_NAME=resource name |
||||
RESOURCE_DESC=resource file desc |
||||
RESOURCE_FILE=resource file |
||||
RESOURCE_ID=resource id |
||||
QUERY_RESOURCE_LIST_NOTES=query resource list |
||||
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id |
||||
VIEW_RESOURCE_BY_ID_NOTES=view resource by id |
||||
ONLINE_CREATE_RESOURCE_NOTES=online create resource |
||||
SUFFIX=resource file suffix |
||||
CONTENT=resource file content |
||||
UPDATE_RESOURCE_NOTES=edit resource file online |
||||
DOWNLOAD_RESOURCE_NOTES=download resource file |
||||
CREATE_UDF_FUNCTION_NOTES=create udf function |
||||
UDF_TYPE=UDF type |
||||
FUNC_NAME=function name |
||||
CLASS_NAME=package and class name |
||||
ARG_TYPES=arguments |
||||
UDF_DESC=udf desc |
||||
VIEW_UDF_FUNCTION_NOTES=view udf function |
||||
UPDATE_UDF_FUNCTION_NOTES=update udf function |
||||
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging |
||||
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name |
||||
DELETE_UDF_FUNCTION_NOTES=delete udf function |
||||
AUTHORIZED_FILE_NOTES=authorized file |
||||
UNAUTHORIZED_FILE_NOTES=unauthorized file |
||||
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func |
||||
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func |
||||
VERIFY_QUEUE_NOTES=verify queue |
||||
TENANT_TAG=tenant related operation |
||||
CREATE_TENANT_NOTES=create tenant |
||||
TENANT_CODE=tenant code |
||||
TENANT_NAME=tenant name |
||||
QUEUE_NAME=queue name |
||||
PASSWORD=password |
||||
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} |
||||
PROJECT_TAG=project related operation |
||||
CREATE_PROJECT_NOTES=create project |
||||
PROJECT_DESC=project description |
||||
UPDATE_PROJECT_NOTES=update project |
||||
PROJECT_ID=project id |
||||
QUERY_PROJECT_BY_ID_NOTES=query project info by project id |
||||
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING |
||||
QUERY_ALL_PROJECT_LIST_NOTES=query all project list |
||||
DELETE_PROJECT_BY_ID_NOTES=delete project by id |
||||
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project |
||||
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project |
||||
TASK_RECORD_TAG=task record related operation |
||||
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging |
||||
CREATE_TOKEN_NOTES=create token ,note: please login first |
||||
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging |
||||
SCHEDULE=schedule |
||||
WARNING_TYPE=warning type(sending strategy) |
||||
WARNING_GROUP_ID=warning group id |
||||
FAILURE_STRATEGY=failure strategy |
||||
RECEIVERS=receivers |
||||
RECEIVERS_CC=receivers cc |
||||
WORKER_GROUP_ID=worker server group id |
||||
PROCESS_INSTANCE_PRIORITY=process instance priority |
||||
UPDATE_SCHEDULE_NOTES=update schedule |
||||
SCHEDULE_ID=schedule id |
||||
ONLINE_SCHEDULE_NOTES=online schedule |
||||
OFFLINE_SCHEDULE_NOTES=offline schedule |
||||
QUERY_SCHEDULE_NOTES=query schedule |
||||
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging |
||||
LOGIN_TAG=User login related operations |
||||
USER_NAME=user name |
||||
PROJECT_NAME=project name |
||||
CREATE_PROCESS_DEFINITION_NOTES=create process definition |
||||
PROCESS_DEFINITION_NAME=process definition name |
||||
PROCESS_DEFINITION_JSON=process definition detail info (json format) |
||||
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format) |
||||
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format) |
||||
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format) |
||||
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format) |
||||
PROCESS_DEFINITION_DESC=process definition desc |
||||
PROCESS_DEFINITION_TAG=process definition related opertation |
||||
SIGNOUT_NOTES=logout |
||||
USER_PASSWORD=user password |
||||
UPDATE_PROCESS_INSTANCE_NOTES=update process instance |
||||
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list |
||||
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name |
||||
LOGIN_NOTES=user login |
||||
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition |
||||
PROCESS_DEFINITION_ID=process definition id |
||||
PROCESS_DEFINITION_IDS=process definition ids |
||||
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition |
||||
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id |
||||
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list |
||||
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging |
||||
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list |
||||
PAGE_NO=page no |
||||
PROCESS_INSTANCE_ID=process instance id |
||||
PROCESS_INSTANCE_JSON=process instance info(json format) |
||||
SCHEDULE_TIME=schedule time |
||||
SYNC_DEFINE=update the information of the process instance to the process definition\ |
||||
|
||||
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance |
||||
SEARCH_VAL=search val |
||||
USER_ID=user id |
||||
PAGE_SIZE=page size |
||||
LIMIT=limit |
||||
VIEW_TREE_NOTES=view tree |
||||
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id |
||||
PROCESS_DEFINITION_ID_LIST=process definition id list |
||||
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id |
||||
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id |
||||
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids |
||||
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id |
||||
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id |
||||
TASK_ID=task instance id |
||||
SKIP_LINE_NUM=skip line num |
||||
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log |
||||
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log |
||||
USERS_TAG=users related operation |
||||
SCHEDULER_TAG=scheduler related operation |
||||
CREATE_SCHEDULE_NOTES=create schedule |
||||
CREATE_USER_NOTES=create user |
||||
TENANT_ID=tenant id |
||||
QUEUE=queue |
||||
EMAIL=email |
||||
PHONE=phone |
||||
QUERY_USER_LIST_NOTES=query user list |
||||
UPDATE_USER_NOTES=update user |
||||
DELETE_USER_BY_ID_NOTES=delete user by id |
||||
GRANT_PROJECT_NOTES=GRANT PROJECT |
||||
PROJECT_IDS=project ids(string format, multiple projects separated by ",") |
||||
GRANT_RESOURCE_NOTES=grant resource file |
||||
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",") |
||||
GET_USER_INFO_NOTES=get user info |
||||
LIST_USER_NOTES=list user |
||||
VERIFY_USER_NAME_NOTES=verify user name |
||||
UNAUTHORIZED_USER_NOTES=cancel authorization |
||||
ALERT_GROUP_ID=alert group id |
||||
AUTHORIZED_USER_NOTES=authorized user |
||||
GRANT_UDF_FUNC_NOTES=grant udf function |
||||
UDF_IDS=udf ids(string format, multiple udf functions separated by ",") |
||||
GRANT_DATASOURCE_NOTES=grant datasource |
||||
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",") |
||||
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id |
||||
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id |
||||
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables |
||||
VIEW_GANTT_NOTES=view gantt |
||||
SUB_PROCESS_INSTANCE_ID=sub process instance id |
||||
TASK_NAME=task instance name |
||||
TASK_INSTANCE_TAG=task instance related operation |
||||
LOGGER_TAG=log related operation |
||||
PROCESS_INSTANCE_TAG=process instance related operation |
||||
EXECUTION_STATUS=runing status for workflow and task nodes |
||||
HOST=ip address of running task |
||||
START_DATE=start date |
||||
END_DATE=end date |
||||
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id |
||||
UPDATE_DATA_SOURCE_NOTES=update data source |
||||
DATA_SOURCE_ID=DATA SOURCE ID |
||||
QUERY_DATA_SOURCE_NOTES=query data source by id |
||||
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type |
||||
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging |
||||
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE |
||||
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test |
||||
DELETE_DATA_SOURCE_NOTES=delete data source |
||||
VERIFY_DATA_SOURCE_NOTES=verify data source |
||||
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source |
||||
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source |
||||
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id |
@ -1,250 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
QUERY_SCHEDULE_LIST_NOTES=查询定时列表 |
||||
PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作 |
||||
RUN_PROCESS_INSTANCE_NOTES=运行流程实例 |
||||
START_NODE_LIST=开始节点列表(节点name) |
||||
TASK_DEPEND_TYPE=任务依赖类型 |
||||
COMMAND_TYPE=指令类型 |
||||
RUN_MODE=运行模式 |
||||
TIMEOUT=超时时间 |
||||
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等) |
||||
EXECUTE_TYPE=执行类型 |
||||
START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义 |
||||
DESC=备注(描述) |
||||
GROUP_NAME=组名称 |
||||
GROUP_TYPE=组类型 |
||||
QUERY_ALERT_GROUP_LIST_NOTES=告警组列表\ |
||||
|
||||
UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组 |
||||
DELETE_ALERT_GROUP_BY_ID_NOTES=删除告警组通过ID |
||||
VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在 |
||||
GRANT_ALERT_GROUP_NOTES=授权告警组 |
||||
USER_IDS=用户ID列表 |
||||
ALERT_GROUP_TAG=告警组相关操作 |
||||
WORKER_GROUP_TAG=Worker分组管理 |
||||
SAVE_WORKER_GROUP_NOTES=创建Worker分组\ |
||||
|
||||
WORKER_GROUP_NAME=Worker分组名称 |
||||
WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割\ |
||||
|
||||
QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理 |
||||
QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组 |
||||
DELETE_WORKER_GROUP_BY_ID_NOTES=删除worker group通过ID |
||||
DATA_ANALYSIS_TAG=任务状态分析相关操作 |
||||
COUNT_TASK_STATE_NOTES=任务状态统计 |
||||
COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态 |
||||
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义 |
||||
COUNT_COMMAND_STATE_NOTES=统计命令状态 |
||||
COUNT_QUEUE_STATE_NOTES=统计队列里任务状态 |
||||
ACCESS_TOKEN_TAG=access token相关操作,需要先登录 |
||||
MONITOR_TAG=监控相关操作 |
||||
MASTER_LIST_NOTES=master服务列表 |
||||
WORKER_LIST_NOTES=worker服务列表 |
||||
QUERY_DATABASE_STATE_NOTES=查询数据库状态 |
||||
QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态 |
||||
TASK_STATE=任务实例状态 |
||||
SOURCE_TABLE=源表 |
||||
DEST_TABLE=目标表 |
||||
TASK_DATE=任务时间 |
||||
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表 |
||||
DATA_SOURCE_TAG=数据源相关操作 |
||||
CREATE_DATA_SOURCE_NOTES=创建数据源 |
||||
DATA_SOURCE_NAME=数据源名称 |
||||
DATA_SOURCE_NOTE=数据源描述 |
||||
DB_TYPE=数据源类型 |
||||
DATA_SOURCE_HOST=IP主机名 |
||||
DATA_SOURCE_PORT=数据源端口 |
||||
DATABASE_NAME=数据库名 |
||||
QUEUE_TAG=队列相关操作 |
||||
QUERY_QUEUE_LIST_NOTES=查询队列列表 |
||||
QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表 |
||||
CREATE_QUEUE_NOTES=创建队列 |
||||
YARN_QUEUE_NAME=hadoop yarn队列名 |
||||
QUEUE_ID=队列ID |
||||
TENANT_DESC=租户描述 |
||||
QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表 |
||||
QUERY_TENANT_LIST_NOTES=查询租户列表 |
||||
UPDATE_TENANT_NOTES=更新租户 |
||||
DELETE_TENANT_NOTES=删除租户 |
||||
RESOURCES_TAG=资源中心相关操作 |
||||
CREATE_RESOURCE_NOTES=创建资源 |
||||
RESOURCE_TYPE=资源文件类型 |
||||
RESOURCE_NAME=资源文件名称 |
||||
RESOURCE_DESC=资源文件描述 |
||||
RESOURCE_FILE=资源文件 |
||||
RESOURCE_ID=资源ID |
||||
QUERY_RESOURCE_LIST_NOTES=查询资源列表 |
||||
DELETE_RESOURCE_BY_ID_NOTES=删除资源通过ID |
||||
VIEW_RESOURCE_BY_ID_NOTES=浏览资源通通过ID |
||||
ONLINE_CREATE_RESOURCE_NOTES=在线创建资源 |
||||
SUFFIX=资源文件后缀 |
||||
CONTENT=资源文件内容 |
||||
UPDATE_RESOURCE_NOTES=在线更新资源文件 |
||||
DOWNLOAD_RESOURCE_NOTES=下载资源文件 |
||||
CREATE_UDF_FUNCTION_NOTES=创建UDF函数 |
||||
UDF_TYPE=UDF类型 |
||||
FUNC_NAME=函数名称 |
||||
CLASS_NAME=包名类名 |
||||
ARG_TYPES=参数 |
||||
UDF_DESC=udf描述,使用说明 |
||||
VIEW_UDF_FUNCTION_NOTES=查看udf函数 |
||||
UPDATE_UDF_FUNCTION_NOTES=更新udf函数 |
||||
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表 |
||||
VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名 |
||||
DELETE_UDF_FUNCTION_NOTES=删除UDF函数 |
||||
AUTHORIZED_FILE_NOTES=授权文件 |
||||
UNAUTHORIZED_FILE_NOTES=取消授权文件 |
||||
AUTHORIZED_UDF_FUNC_NOTES=授权udf函数 |
||||
UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权 |
||||
VERIFY_QUEUE_NOTES=验证队列 |
||||
TENANT_TAG=租户相关操作 |
||||
CREATE_TENANT_NOTES=创建租户 |
||||
TENANT_CODE=租户编码 |
||||
TENANT_NAME=租户名称 |
||||
QUEUE_NAME=队列名 |
||||
PASSWORD=密码 |
||||
DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...} |
||||
PROJECT_TAG=项目相关操作 |
||||
CREATE_PROJECT_NOTES=创建项目 |
||||
PROJECT_DESC=项目描述 |
||||
UPDATE_PROJECT_NOTES=更新项目 |
||||
PROJECT_ID=项目ID |
||||
QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息 |
||||
QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表 |
||||
QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目 |
||||
DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID |
||||
QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目 |
||||
QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目 |
||||
TASK_RECORD_TAG=任务记录相关操作 |
||||
QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表 |
||||
CREATE_TOKEN_NOTES=创建token,注意需要先登录 |
||||
QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表 |
||||
SCHEDULE=定时 |
||||
WARNING_TYPE=发送策略 |
||||
WARNING_GROUP_ID=发送组ID |
||||
FAILURE_STRATEGY=失败策略 |
||||
RECEIVERS=收件人 |
||||
RECEIVERS_CC=收件人(抄送) |
||||
WORKER_GROUP_ID=Worker Server分组ID |
||||
PROCESS_INSTANCE_PRIORITY=流程实例优先级 |
||||
UPDATE_SCHEDULE_NOTES=更新定时 |
||||
SCHEDULE_ID=定时ID |
||||
ONLINE_SCHEDULE_NOTES=定时上线 |
||||
OFFLINE_SCHEDULE_NOTES=定时下线 |
||||
QUERY_SCHEDULE_NOTES=查询定时 |
||||
QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时 |
||||
LOGIN_TAG=用户登录相关操作 |
||||
USER_NAME=用户名 |
||||
PROJECT_NAME=项目名称 |
||||
CREATE_PROCESS_DEFINITION_NOTES=创建流程定义 |
||||
PROCESS_DEFINITION_NAME=流程定义名称 |
||||
PROCESS_DEFINITION_JSON=流程定义详细信息(json格式) |
||||
PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式) |
||||
PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式) |
||||
PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式) |
||||
PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式) |
||||
PROCESS_DEFINITION_DESC=流程定义描述信息 |
||||
PROCESS_DEFINITION_TAG=流程定义相关操作 |
||||
SIGNOUT_NOTES=退出登录 |
||||
USER_PASSWORD=用户密码 |
||||
UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例 |
||||
QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表 |
||||
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=验证流程定义名字 |
||||
LOGIN_NOTES=用户登录 |
||||
UPDATE_PROCCESS_DEFINITION_NOTES=更新流程定义 |
||||
PROCESS_DEFINITION_ID=流程定义ID |
||||
RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义 |
||||
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID |
||||
QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表 |
||||
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 |
||||
QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义 |
||||
PAGE_NO=页码号 |
||||
PROCESS_INSTANCE_ID=流程实例ID |
||||
PROCESS_INSTANCE_IDS=流程实例ID集合 |
||||
PROCESS_INSTANCE_JSON=流程实例信息(json格式) |
||||
SCHEDULE_TIME=定时时间 |
||||
SYNC_DEFINE=更新流程实例的信息是否同步到流程定义 |
||||
RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例 |
||||
SEARCH_VAL=搜索值 |
||||
USER_ID=用户ID |
||||
PAGE_SIZE=页大小 |
||||
LIMIT=显示多少条 |
||||
VIEW_TREE_NOTES=树状图 |
||||
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID |
||||
PROCESS_DEFINITION_ID_LIST=流程定义id列表 |
||||
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID |
||||
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合 |
||||
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID |
||||
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID |
||||
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID |
||||
TASK_ID=任务实例ID |
||||
SKIP_LINE_NUM=忽略行数 |
||||
QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志 |
||||
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志 |
||||
USERS_TAG=用户相关操作 |
||||
SCHEDULER_TAG=定时相关操作 |
||||
CREATE_SCHEDULE_NOTES=创建定时 |
||||
CREATE_USER_NOTES=创建用户 |
||||
TENANT_ID=租户ID |
||||
QUEUE=使用的队列 |
||||
EMAIL=邮箱 |
||||
PHONE=手机号 |
||||
QUERY_USER_LIST_NOTES=查询用户列表 |
||||
UPDATE_USER_NOTES=更新用户 |
||||
DELETE_USER_BY_ID_NOTES=删除用户通过ID |
||||
GRANT_PROJECT_NOTES=授权项目 |
||||
PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割) |
||||
GRANT_RESOURCE_NOTES=授权资源文件 |
||||
RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割) |
||||
GET_USER_INFO_NOTES=获取用户信息 |
||||
LIST_USER_NOTES=用户列表 |
||||
VERIFY_USER_NAME_NOTES=验证用户名 |
||||
UNAUTHORIZED_USER_NOTES=取消授权 |
||||
ALERT_GROUP_ID=报警组ID |
||||
AUTHORIZED_USER_NOTES=授权用户 |
||||
GRANT_UDF_FUNC_NOTES=授权udf函数 |
||||
UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割) |
||||
GRANT_DATASOURCE_NOTES=授权数据源 |
||||
DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割) |
||||
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=查询子流程实例通过任务实例ID |
||||
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=查询父流程实例信息通过子流程实例ID |
||||
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量 |
||||
VIEW_GANTT_NOTES=浏览Gantt图 |
||||
SUB_PROCESS_INSTANCE_ID=子流程是咧ID |
||||
TASK_NAME=任务实例名 |
||||
TASK_INSTANCE_TAG=任务实例相关操作 |
||||
LOGGER_TAG=日志相关操作 |
||||
PROCESS_INSTANCE_TAG=流程实例相关操作 |
||||
EXECUTION_STATUS=工作流和任务节点的运行状态 |
||||
HOST=运行任务的主机IP地址 |
||||
START_DATE=开始时间 |
||||
END_DATE=结束时间 |
||||
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表 |
||||
UPDATE_DATA_SOURCE_NOTES=更新数据源 |
||||
DATA_SOURCE_ID=数据源ID |
||||
QUERY_DATA_SOURCE_NOTES=查询数据源通过ID |
||||
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=查询数据源列表通过数据源类型 |
||||
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表 |
||||
CONNECT_DATA_SOURCE_NOTES=连接数据源 |
||||
CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试 |
||||
DELETE_DATA_SOURCE_NOTES=删除数据源 |
||||
VERIFY_DATA_SOURCE_NOTES=验证数据源 |
||||
UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源 |
||||
AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源 |
||||
DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据 |
@ -1,17 +0,0 @@
|
||||
<#-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> dolphinscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html> |
@ -1,52 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html --> |
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" --> |
||||
<property name="log.base" value="logs" /> |
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> |
||||
<encoder> |
||||
<pattern> |
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||
</pattern> |
||||
<charset>UTF-8</charset> |
||||
</encoder> |
||||
</appender> |
||||
|
||||
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> |
||||
<file>${log.base}/dolphinscheduler-master.log</file> |
||||
<filter class="org.apache.dolphinscheduler.server.master.log.MasterLogFilter"> |
||||
<level>INFO</level> |
||||
</filter> |
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"> |
||||
<fileNamePattern>${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern> |
||||
<maxHistory>168</maxHistory> |
||||
<maxFileSize>200MB</maxFileSize> |
||||
</rollingPolicy> |
||||
<encoder> |
||||
<pattern> |
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n |
||||
</pattern> |
||||
<charset>UTF-8</charset> |
||||
</encoder> |
||||
</appender> |
||||
|
||||
<root level="INFO"> |
||||
<appender-ref ref="MASTERLOGFILE"/> |
||||
</root> |
||||
</configuration> |
@ -1,33 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper"> |
||||
<select id="selectAccessTokenPage" resultType="org.apache.dolphinscheduler.dao.entity.AccessToken"> |
||||
select * from t_ds_access_token t |
||||
left join t_ds_user u on t.user_id = u.id |
||||
where 1 = 1 |
||||
<if test="userName != null and userName != ''"> |
||||
and u.user_name like concat ('%', #{userName}, '%') |
||||
</if> |
||||
<if test="userId != 0"> |
||||
and t.user_id = #{userId} |
||||
</if> |
||||
order by t.update_time desc |
||||
</select> |
||||
</mapper> |
@ -1,47 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper"> |
||||
<select id="queryAlertGroupPage" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
||||
select * from t_ds_alertgroup |
||||
where 1 = 1 |
||||
<if test="groupName != null and groupName != ''"> |
||||
and group_name like concat('%', #{groupName}, '%') |
||||
</if> |
||||
order by update_time desc |
||||
</select> |
||||
<select id="queryByGroupName" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
||||
select * from t_ds_alertgroup |
||||
where group_name=#{groupName} |
||||
</select> |
||||
<select id="queryByUserId" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
||||
select * from t_ds_alertgroup t |
||||
left join t_ds_relation_user_alertgroup r on t.id=r.alertgroup_id |
||||
where r.user_id=#{userId} |
||||
</select> |
||||
<select id="queryByAlertType" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
||||
select * from t_ds_alertgroup |
||||
where group_type=#{alertType} |
||||
</select> |
||||
<select id="queryAllGroupList" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup"> |
||||
select * |
||||
from t_ds_alertgroup |
||||
order by update_time desc |
||||
</select> |
||||
</mapper> |
@ -1,26 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertMapper"> |
||||
<select id="listAlertByStatus" resultType="org.apache.dolphinscheduler.dao.entity.Alert"> |
||||
select * |
||||
from t_ds_alert |
||||
where alert_status = #{alertStatus} |
||||
</select> |
||||
</mapper> |
@ -1,43 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.CommandMapper"> |
||||
<select id="getOneToRun" resultType="org.apache.dolphinscheduler.dao.entity.Command"> |
||||
select command.* from t_ds_command command |
||||
join t_ds_process_definition definition on command.process_definition_id = definition.id |
||||
where definition.release_state = 1 AND definition.flag = 1 |
||||
order by command.update_time asc |
||||
limit 1 |
||||
</select> |
||||
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount"> |
||||
select cmd.command_type as command_type, count(1) as count |
||||
from t_ds_command cmd, t_ds_process_definition process |
||||
where cmd.process_definition_id = process.id |
||||
<if test="projectIdArray != null and projectIdArray.length != 0"> |
||||
and process.project_id in |
||||
<foreach collection="projectIdArray" index="index" item="i" open="(" close=")" separator=","> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
<if test="startTime != null and endTime != null"> |
||||
and cmd.start_time <![CDATA[ >= ]]> #{startTime} and cmd.update_time <![CDATA[ <= ]]> #{endTime} |
||||
</if> |
||||
group by cmd.command_type |
||||
</select> |
||||
</mapper> |
@ -1,79 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.DataSourceMapper"> |
||||
<select id="queryDataSourceByType" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
||||
select * |
||||
from t_ds_datasource |
||||
where type=#{type} |
||||
<if test="userId != 0"> |
||||
and id in |
||||
(select datasource_id |
||||
from t_ds_relation_datasource_user |
||||
where user_id=#{userId} |
||||
union select id as datasource_id |
||||
from t_ds_datasource |
||||
where user_id=#{userId} |
||||
) |
||||
</if> |
||||
|
||||
</select> |
||||
|
||||
<select id="selectPaging" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
||||
select * |
||||
from t_ds_datasource |
||||
where 1 =1 |
||||
<if test="userId != 0"> |
||||
and id in |
||||
(select datasource_id |
||||
from t_ds_relation_datasource_user |
||||
where user_id=#{userId} |
||||
union select id as datasource_id |
||||
from t_ds_datasource |
||||
where user_id=#{userId} |
||||
) |
||||
</if> |
||||
<if test="name != null and name != ''"> |
||||
and name like concat ('%', #{name}, '%') |
||||
</if> |
||||
order by update_time desc |
||||
</select> |
||||
<select id="queryDataSourceByName" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
||||
select * |
||||
from t_ds_datasource |
||||
where name=#{name} |
||||
</select> |
||||
<select id="queryAuthedDatasource" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
||||
select datasource.* |
||||
from t_ds_datasource datasource, t_ds_relation_datasource_user rel |
||||
where datasource.id = rel.datasource_id AND rel.user_id = #{userId} |
||||
</select> |
||||
<select id="queryDatasourceExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
||||
select * |
||||
from t_ds_datasource |
||||
where user_id <![CDATA[ <> ]]> #{userId} |
||||
</select> |
||||
<select id="listAllDataSourceByType" resultType="org.apache.dolphinscheduler.dao.entity.DataSource"> |
||||
select * |
||||
from t_ds_datasource |
||||
where type = #{type} |
||||
</select> |
||||
|
||||
|
||||
</mapper> |
@ -1,30 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper"> |
||||
<delete id="deleteByUserId"> |
||||
delete from t_ds_relation_datasource_user |
||||
where user_id = #{userId} |
||||
|
||||
</delete> |
||||
<delete id="deleteByDatasourceId"> |
||||
delete from t_ds_relation_datasource_user |
||||
where datasource_id = #{datasourceId} |
||||
</delete> |
||||
</mapper> |
@ -1,36 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper"> |
||||
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount"> |
||||
select cmd.command_type as command_type, count(1) as count |
||||
from t_ds_error_command cmd, t_ds_process_definition process |
||||
where cmd.process_definition_id = process.id |
||||
<if test="projectIdArray != null and projectIdArray.length != 0"> |
||||
and process.project_id in |
||||
<foreach collection="projectIdArray" index="index" item="i" open="(" close=")" separator=","> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
<if test="startTime != null and endTime != null"> |
||||
and cmd.startTime <![CDATA[ >= ]]> #{startTime} and cmd.update_time <![CDATA[ <= ]]> #{endTime} |
||||
</if> |
||||
group by cmd.command_type |
||||
</select> |
||||
</mapper> |
@ -1,96 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper"> |
||||
<select id="queryByDefineName" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
||||
select pd.*,u.user_name,p.name as project_name,t.tenant_code,t.tenant_name,q.queue,q.queue_name |
||||
from t_ds_process_definition pd |
||||
JOIN t_ds_user u ON pd.user_id = u.id |
||||
JOIN t_ds_project p ON pd.project_id = p.id |
||||
JOIN t_ds_tenant t ON t.id = u.tenant_id |
||||
JOIN t_ds_queue q ON t.queue_id = q.id |
||||
WHERE p.id = #{projectId} |
||||
and pd.name = #{processDefinitionName} |
||||
</select> |
||||
<select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
||||
SELECT td.*,sc.schedule_release_state,tu.user_name |
||||
FROM t_ds_process_definition td |
||||
left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id |
||||
left join t_ds_user tu on td.user_id = tu.id |
||||
where td.project_id = #{projectId} |
||||
<if test=" isAdmin == false "> |
||||
and tu.user_type=1 |
||||
</if> |
||||
<if test=" searchVal != null and searchVal != ''"> |
||||
and td.name like concat('%', #{searchVal}, '%') |
||||
</if> |
||||
<if test=" userId != 0"> |
||||
and td.user_id = #{userId} |
||||
</if> |
||||
order by sc.schedule_release_state desc,td.update_time desc |
||||
</select> |
||||
|
||||
<select id="queryAllDefinitionList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
||||
select * |
||||
from t_ds_process_definition |
||||
where project_id = #{projectId} |
||||
order by create_time desc |
||||
</select> |
||||
<select id="queryDefinitionListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
||||
select * |
||||
from t_ds_process_definition |
||||
where tenant_id = #{tenantId} |
||||
</select> |
||||
<select id="queryDefinitionListByIdList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
||||
select * |
||||
from t_ds_process_definition |
||||
where id in |
||||
<foreach collection="ids" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</select> |
||||
<select id="countDefinitionGroupByUser" resultType="org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser"> |
||||
SELECT td.user_id as user_id, tu.user_name as user_name, count(0) as count |
||||
FROM t_ds_process_definition td |
||||
JOIN t_ds_user tu on tu.id=td.user_id |
||||
where 1 = 1 |
||||
<if test=" isAdmin == false "> |
||||
and tu.user_type=1 |
||||
</if> |
||||
<if test="projectIds != null and projectIds.length != 0"> |
||||
and td.project_id in |
||||
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
group by td.user_id,tu.user_name |
||||
</select> |
||||
<select id="queryByDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> |
||||
SELECT |
||||
pd.*, u.user_name, |
||||
p.name AS project_name |
||||
FROM |
||||
t_ds_process_definition pd, |
||||
t_ds_user u, |
||||
t_ds_project p |
||||
WHERE |
||||
pd.user_id = u.id AND pd.project_id = p.id |
||||
AND pd.id = #{processDefineId} |
||||
</select> |
||||
</mapper> |
@ -1,43 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper"> |
||||
<delete id="deleteByParentProcessId"> |
||||
delete |
||||
from t_ds_relation_process_instance |
||||
where parent_process_instance_id=#{parentProcessId} |
||||
|
||||
</delete> |
||||
<select id="queryByParentId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap"> |
||||
select * |
||||
from t_ds_relation_process_instance |
||||
where parent_process_instance_id = #{parentProcessId} |
||||
and parent_task_instance_id = #{parentTaskId} |
||||
</select> |
||||
<select id="queryBySubProcessId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap"> |
||||
select * |
||||
from t_ds_relation_process_instance |
||||
where process_instance_id = #{subProcessId} |
||||
</select> |
||||
<select id="querySubIdListByParentId" resultType="java.lang.Integer"> |
||||
select process_instance_id |
||||
from t_ds_relation_process_instance |
||||
where parent_process_instance_id = #{parentInstanceId} |
||||
</select> |
||||
</mapper> |
@ -1,182 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper"> |
||||
<select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
||||
select inst.* |
||||
from t_ds_process_instance inst |
||||
where inst.id = #{processId} |
||||
</select> |
||||
<select id="queryByHostAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
||||
select * |
||||
from t_ds_process_instance |
||||
where 1=1 |
||||
<if test="host != null and host != ''"> |
||||
and host=#{host} |
||||
</if> |
||||
and state in |
||||
<foreach collection="states" item="i" open="(" close=")" separator=","> |
||||
#{i} |
||||
</foreach> |
||||
order by id asc |
||||
</select> |
||||
|
||||
<select id="queryByTenantIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
||||
select * |
||||
from t_ds_process_instance |
||||
where 1=1 |
||||
<if test="tenantId != -1"> |
||||
and tenant_id =#{tenantId} |
||||
</if> |
||||
and state in |
||||
<foreach collection="states" item="i" open="(" close=")" separator=","> |
||||
#{i} |
||||
</foreach> |
||||
order by id asc |
||||
</select> |
||||
|
||||
<select id="queryByWorkerGroupIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
||||
select * |
||||
from t_ds_process_instance |
||||
where 1=1 |
||||
<if test="workerGroupId != -1"> |
||||
and worker_group_id =#{workerGroupId} |
||||
</if> |
||||
and state in |
||||
<foreach collection="states" item="i" open="(" close=")" separator=","> |
||||
#{i} |
||||
</foreach> |
||||
order by id asc |
||||
</select> |
||||
|
||||
<select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
||||
select instance.* |
||||
from t_ds_process_instance instance |
||||
join t_ds_process_definition define ON instance.process_definition_id = define.id |
||||
where 1=1 |
||||
and instance.is_sub_process=0 |
||||
and define.project_id = #{projectId} |
||||
<if test="processDefinitionId != 0"> |
||||
and instance.process_definition_id = #{processDefinitionId} |
||||
</if> |
||||
<if test="searchVal != null and searchVal != ''"> |
||||
and instance.name like concat('%', #{searchVal}, '%') |
||||
</if> |
||||
<if test="startTime != null "> |
||||
and instance.start_time > #{startTime} and instance.start_time <![CDATA[ <=]]> #{endTime} |
||||
</if> |
||||
<if test="states != null and states != ''"> |
||||
and instance.state in |
||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
<if test="host != null and host != ''"> |
||||
and instance.host like concat('%', #{host}, '%') |
||||
</if> |
||||
order by instance.start_time desc |
||||
</select> |
||||
<update id="setFailoverByHostAndStateArray"> |
||||
update t_ds_process_instance |
||||
set host=null |
||||
where host =#{host} and state in |
||||
<foreach collection="states" index="index" item="i" open="(" close=")" separator=","> |
||||
#{i} |
||||
</foreach> |
||||
</update> |
||||
<update id="updateProcessInstanceByState"> |
||||
update t_ds_process_instance |
||||
set state = #{destState} |
||||
where state = #{originState} |
||||
</update> |
||||
|
||||
<update id="updateProcessInstanceByTenantId"> |
||||
update t_ds_process_instance |
||||
set tenant_id = #{destTenantId} |
||||
where tenant_id = #{originTenantId} |
||||
</update> |
||||
|
||||
<update id="updateProcessInstanceByWorkerGroupId"> |
||||
update t_ds_process_instance |
||||
set worker_group_id = #{destWorkerGroupId} |
||||
where worker_group_id = #{originWorkerGroupId} |
||||
</update> |
||||
|
||||
<select id="countInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount"> |
||||
select t.state, count(0) as count |
||||
from t_ds_process_instance t |
||||
join t_ds_process_definition d on d.id=t.process_definition_id |
||||
join t_ds_project p on p.id=d.project_id |
||||
where 1 = 1 |
||||
and t.is_sub_process = 0 |
||||
<if test="startTime != null and endTime != null"> |
||||
and t.start_time >= #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime} |
||||
</if> |
||||
<if test="projectIds != null and projectIds.length != 0"> |
||||
and p.id in |
||||
<foreach collection="projectIds" index="index" item="i" open="(" close=")" separator=","> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
group by t.state |
||||
</select> |
||||
<select id="queryByProcessDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
||||
select * |
||||
from t_ds_process_instance |
||||
where process_definition_id=#{processDefinitionId} |
||||
order by start_time desc limit #{size} |
||||
</select> |
||||
<select id="queryLastSchedulerProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
||||
select * |
||||
from t_ds_process_instance |
||||
where process_definition_id=#{processDefinitionId} |
||||
<if test="startTime!=null and endTime != null "> |
||||
and schedule_time between #{startTime} and #{endTime} |
||||
</if> |
||||
order by end_time desc limit 1 |
||||
</select> |
||||
<select id="queryLastRunningProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
||||
select * |
||||
from t_ds_process_instance |
||||
where 1=1 |
||||
<if test="states !=null and states.length != 0"> |
||||
and state in |
||||
<foreach collection="states" item="i" index="index" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
<if test="startTime!=null and endTime != null "> |
||||
and process_definition_id=#{processDefinitionId} |
||||
and (schedule_time between #{startTime} and #{endTime} or start_time between #{startTime} and #{endTime}) |
||||
</if> |
||||
order by start_time desc limit 1 |
||||
</select> |
||||
<select id="queryLastManualProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> |
||||
select * |
||||
from t_ds_process_instance |
||||
where process_definition_id=#{processDefinitionId} |
||||
and schedule_time is null |
||||
<if test="startTime!=null and endTime != null "> |
||||
and start_time between #{startTime} and #{endTime} |
||||
</if> |
||||
order by end_time desc limit 1 |
||||
</select> |
||||
|
||||
|
||||
</mapper> |
@ -1,68 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectMapper"> |
||||
<select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
||||
select p.*,u.user_name as user_name |
||||
from t_ds_project p |
||||
join t_ds_user u on p.user_id = u.id |
||||
where p.id = #{projectId} |
||||
</select> |
||||
<select id="queryByName" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
||||
select p.*,u.user_name as user_name |
||||
from t_ds_project p |
||||
join t_ds_user u on p.user_id = u.id |
||||
where p.name = #{projectName} |
||||
limit 1 |
||||
</select> |
||||
<select id="queryProjectListPaging" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
||||
select p.*,u.user_name as user_name, |
||||
(SELECT COUNT(*) FROM t_ds_process_definition AS def WHERE def.project_id = p.id) AS def_count, |
||||
(SELECT COUNT(*) FROM t_ds_process_definition def, t_ds_process_instance inst WHERE def.id = inst.process_definition_id AND def.project_id = p.id AND inst.state=1 ) as inst_running_count |
||||
from t_ds_project p |
||||
join t_ds_user u on u.id=p.user_id |
||||
where 1=1 |
||||
<if test="userId != 0"> |
||||
and p.id in |
||||
(select project_id from t_ds_relation_project_user where user_id=#{userId} |
||||
union select id as project_id from t_ds_project where user_id=#{userId} |
||||
) |
||||
</if> |
||||
<if test="searchName!=null and searchName != ''"> |
||||
and p.name like concat('%', #{searchName}, '%') |
||||
</if> |
||||
order by p.create_time desc |
||||
</select> |
||||
<select id="queryAuthedProjectListByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
||||
select p.* |
||||
from t_ds_project p,t_ds_relation_project_user rel |
||||
where p.id = rel.project_id and rel.user_id= #{userId} |
||||
</select> |
||||
<select id="queryProjectExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
||||
select * |
||||
from t_ds_project |
||||
where user_id <![CDATA[ <> ]]> #{userId} |
||||
</select> |
||||
<select id="queryProjectCreatedByUser" resultType="org.apache.dolphinscheduler.dao.entity.Project"> |
||||
select * |
||||
from t_ds_project |
||||
where user_id = #{userId} |
||||
</select> |
||||
|
||||
</mapper> |
@ -1,36 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper"> |
||||
<delete id="deleteProjectRelation"> |
||||
delete from t_ds_relation_project_user |
||||
where 1=1 |
||||
and user_id = #{userId} |
||||
<if test="projectId != 0 "> |
||||
and project_id = #{projectId} |
||||
</if> |
||||
</delete> |
||||
<select id="queryProjectRelation" resultType="org.apache.dolphinscheduler.dao.entity.ProjectUser"> |
||||
select * |
||||
from t_ds_relation_project_user |
||||
where project_id = #{projectId} |
||||
and user_id = #{userId} |
||||
limit 1 |
||||
</select> |
||||
</mapper> |
@ -1,42 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.QueueMapper"> |
||||
<select id="queryQueuePaging" resultType="org.apache.dolphinscheduler.dao.entity.Queue"> |
||||
select * |
||||
from t_ds_queue |
||||
where 1= 1 |
||||
<if test="searchVal != null and searchVal != ''"> |
||||
and queue_name like concat('%', #{searchVal}, '%') |
||||
</if> |
||||
order by update_time desc |
||||
</select> |
||||
<select id="queryAllQueueList" resultType="org.apache.dolphinscheduler.dao.entity.Queue"> |
||||
select * |
||||
from t_ds_queue |
||||
where 1=1 |
||||
<if test="queue != null and queue != ''"> |
||||
and queue = #{queue} |
||||
</if> |
||||
<if test="queueName != null and queueName != ''"> |
||||
and queue_name =#{queueName} |
||||
</if> |
||||
</select> |
||||
|
||||
</mapper> |
@ -1,74 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ResourceMapper"> |
||||
<select id="queryResourceList" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
||||
select * |
||||
from t_ds_resources |
||||
where 1= 1 |
||||
<if test="alias != null and alias != ''"> |
||||
and alias = #{alias} |
||||
</if> |
||||
<if test="type != -1"> |
||||
and type = #{type} |
||||
</if> |
||||
<if test="userId != 0"> |
||||
and user_id = #{userId} |
||||
</if> |
||||
</select> |
||||
<select id="queryResourceListAuthored" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
||||
select * |
||||
from t_ds_resources |
||||
where 1 = 1 |
||||
<if test="type != -1"> |
||||
and type=#{type} |
||||
</if> |
||||
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId} |
||||
union select id as resources_id from t_ds_resources where user_id=#{userId}) |
||||
</select> |
||||
<select id="queryResourcePaging" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
||||
select * |
||||
from t_ds_resources |
||||
where type=#{type} |
||||
<if test="userId != 0"> |
||||
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId} |
||||
union select id as resources_id from t_ds_resources where user_id=#{userId}) |
||||
</if> |
||||
<if test="searchVal != null and searchVal != ''"> |
||||
and alias like concat('%', #{searchVal}, '%') |
||||
</if> |
||||
order by update_time desc |
||||
</select> |
||||
<select id="queryAuthorizedResourceList" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
||||
select r.* |
||||
from t_ds_resources r,t_ds_relation_resources_user rel |
||||
where r.id = rel.resources_id AND rel.user_id = #{userId} |
||||
</select> |
||||
<select id="queryResourceExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Resource"> |
||||
select * |
||||
from t_ds_resources |
||||
where user_id <![CDATA[ <> ]]> #{userId} |
||||
</select> |
||||
<select id="queryTenantCodeByResourceName" resultType="java.lang.String"> |
||||
select tenant_code |
||||
from t_ds_tenant t, t_ds_user u, t_ds_resources res |
||||
where t.id = u.tenant_id and u.id = res.user_id and res.type=0 |
||||
and res.alias= #{resName} |
||||
</select> |
||||
</mapper> |
@ -1,32 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper"> |
||||
<delete id="deleteResourceUser"> |
||||
delete |
||||
from t_ds_relation_resources_user |
||||
where 1 = 1 |
||||
<if test="userId != 0"> |
||||
and user_id = #{userId} |
||||
</if> |
||||
<if test="resourceId != 0"> |
||||
and resources_id = #{resourceId} |
||||
</if> |
||||
</delete> |
||||
</mapper> |
@ -1,58 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ScheduleMapper"> |
||||
<select id="queryByProcessDefineIdPaging" resultType="org.apache.dolphinscheduler.dao.entity.Schedule"> |
||||
select p_f.name as process_definition_name, p.name as project_name,u.user_name,s.* |
||||
from t_ds_schedules s |
||||
join t_ds_process_definition p_f on s.process_definition_id = p_f.id |
||||
join t_ds_project as p on p_f.project_id = p.id |
||||
join t_ds_user as u on s.user_id = u.id |
||||
where 1=1 |
||||
<if test="processDefinitionId!= 0"> |
||||
and s.process_definition_id = #{processDefinitionId} |
||||
</if> |
||||
order by s.update_time desc |
||||
</select> |
||||
<select id="querySchedulerListByProjectName" resultType="org.apache.dolphinscheduler.dao.entity.Schedule"> |
||||
select p_f.name as process_definition_name, p_f.description as definition_description, p.name as project_name,u.user_name,s.* |
||||
from t_ds_schedules s |
||||
join t_ds_process_definition p_f on s.process_definition_id = p_f.id |
||||
join t_ds_project as p on p_f.project_id = p.id |
||||
join t_ds_user as u on s.user_id = u.id |
||||
where p.name = #{projectName} |
||||
</select> |
||||
<select id="selectAllByProcessDefineArray" resultType="org.apache.dolphinscheduler.dao.entity.Schedule"> |
||||
select * |
||||
from t_ds_schedules |
||||
where 1= 1 |
||||
<if test="processDefineIds != null and processDefineIds.length != 0 "> |
||||
and process_definition_id in |
||||
<foreach collection="processDefineIds" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
and release_state = 1 |
||||
</select> |
||||
<select id="queryByProcessDefinitionId" resultType="org.apache.dolphinscheduler.dao.entity.Schedule"> |
||||
select * |
||||
from t_ds_schedules |
||||
where process_definition_id =#{processDefinitionId} |
||||
</select> |
||||
</mapper> |
@ -1,32 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.SessionMapper"> |
||||
<select id="queryByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Session"> |
||||
select * |
||||
from t_ds_session |
||||
where user_id = #{userId} |
||||
</select> |
||||
|
||||
<select id="queryByUserIdAndIp" resultType="org.apache.dolphinscheduler.dao.entity.Session"> |
||||
select * |
||||
from t_ds_session |
||||
where user_id = #{userId} AND ip = #{ip} |
||||
</select> |
||||
</mapper> |
@ -1,129 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper"> |
||||
<update id="setFailoverByHostAndStateArray"> |
||||
update t_ds_task_instance |
||||
set state = #{destStatus} |
||||
where host = #{host} |
||||
and state in |
||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</update> |
||||
<select id="queryTaskByProcessIdAndState" resultType="java.lang.Integer"> |
||||
select id |
||||
from t_ds_task_instance |
||||
WHERE process_instance_id = #{processInstanceId} |
||||
and state = #{state} |
||||
and flag = 1 |
||||
</select> |
||||
<select id="findValidTaskListByProcessId" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance"> |
||||
select * |
||||
from t_ds_task_instance |
||||
WHERE process_instance_id = #{processInstanceId} |
||||
and flag = #{flag} |
||||
order by start_time desc |
||||
</select> |
||||
<select id="queryByHostAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance"> |
||||
select * |
||||
from t_ds_task_instance |
||||
where 1 = 1 |
||||
<if test="host != null and host != ''"> |
||||
and host = #{host} |
||||
</if> |
||||
<if test="states != null and states.length != 0"> |
||||
and state in |
||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
</select> |
||||
<select id="countTaskInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount"> |
||||
select state, count(0) as count |
||||
from t_ds_task_instance t |
||||
left join t_ds_process_definition d on d.id=t.process_definition_id |
||||
left join t_ds_project p on p.id=d.project_id |
||||
where 1=1 |
||||
<if test="projectIds != null and projectIds.length != 0"> |
||||
and d.project_id in |
||||
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
<if test="startTime != null and endTime != null"> |
||||
and t.start_time > #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime} |
||||
</if> |
||||
group by t.state |
||||
</select> |
||||
<select id="queryByInstanceIdAndName" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance"> |
||||
select * |
||||
from t_ds_task_instance |
||||
where process_instance_id = #{processInstanceId} |
||||
and name = #{name} |
||||
and flag = 1 |
||||
limit 1 |
||||
</select> |
||||
<select id="countTask" resultType="java.lang.Integer"> |
||||
select count(1) as count |
||||
from t_ds_task_instance task,t_ds_process_definition process |
||||
where task.process_definition_id=process.id |
||||
<if test="projectIds != null and projectIds.length != 0"> |
||||
and process.project_id in |
||||
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
<if test="taskIds != null and taskIds.length != 0"> |
||||
and task.id in |
||||
<foreach collection="taskIds" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
</select> |
||||
<select id="queryTaskInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance"> |
||||
select instance.*,process.name as process_instance_name |
||||
from t_ds_task_instance instance |
||||
join t_ds_process_definition define ON instance.process_definition_id = define.id |
||||
join t_ds_process_instance process on process.id=instance.process_instance_id |
||||
where define.project_id = #{projectId} |
||||
<if test="startTime != null"> |
||||
and instance.start_time > #{startTime} and instance.start_time <![CDATA[ <=]]> #{endTime} |
||||
</if> |
||||
<if test="processInstanceId != 0"> |
||||
and instance.process_instance_id = #{processInstanceId} |
||||
</if> |
||||
<if test="searchVal != null and searchVal != ''"> |
||||
and instance.name like concat('%', #{searchVal}, '%') |
||||
</if> |
||||
<if test="taskName != null and taskName != ''"> |
||||
and instance.name=#{taskName} |
||||
</if> |
||||
<if test="states != null and states.length != 0"> |
||||
and instance.state in |
||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")"> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
<if test="host != null and host != ''"> |
||||
and instance.host like concat('%', #{host}, '%') |
||||
</if> |
||||
order by instance.start_time desc |
||||
</select> |
||||
</mapper> |
@ -1,41 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.TenantMapper"> |
||||
<select id="queryById" resultType="org.apache.dolphinscheduler.dao.entity.Tenant"> |
||||
SELECT t.*,q.queue_name,q.queue |
||||
FROM t_ds_tenant t,t_ds_queue q |
||||
WHERE t.queue_id = q.id |
||||
and t.id = #{tenantId} |
||||
</select> |
||||
<select id="queryByTenantCode" resultType="org.apache.dolphinscheduler.dao.entity.Tenant"> |
||||
select * |
||||
from t_ds_tenant |
||||
where tenant_code = #{tenantCode} |
||||
</select> |
||||
<select id="queryTenantPaging" resultType="org.apache.dolphinscheduler.dao.entity.Tenant"> |
||||
SELECT t.*,q.queue_name |
||||
FROM t_ds_tenant t,t_ds_queue q |
||||
WHERE t.queue_id = q.id |
||||
<if test="searchVal != null and searchVal != ''"> |
||||
and t.tenant_name like concat('%', #{searchVal}, '%') |
||||
</if> |
||||
order by t.update_time desc |
||||
</select> |
||||
</mapper> |
@ -1,29 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UDFUserMapper"> |
||||
<delete id="deleteByUserId"> |
||||
delete from t_ds_relation_udfs_user |
||||
where user_id = #{userId} |
||||
</delete> |
||||
<delete id="deleteByUdfFuncId"> |
||||
delete from t_ds_relation_udfs_user |
||||
where udf_id = #{udfFuncId} |
||||
</delete> |
||||
</mapper> |
@ -1,71 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper"> |
||||
<select id="queryUdfByIdStr" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
||||
select * |
||||
from t_ds_udfs |
||||
where 1 = 1 |
||||
<if test="ids != null and ids != ''"> |
||||
and id in |
||||
<foreach collection="ids" item="i" open="(" close=")" separator=","> |
||||
#{i} |
||||
</foreach> |
||||
</if> |
||||
<if test="funcNames != null and funcNames != ''"> |
||||
and func_name = #{funcNames} |
||||
</if> |
||||
order by id asc |
||||
</select> |
||||
<select id="queryUdfFuncPaging" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
||||
select * |
||||
from t_ds_udfs |
||||
where 1=1 |
||||
<if test="searchVal!= null and searchVal != ''"> |
||||
and func_name like concat('%', #{searchVal}, '%') |
||||
</if> |
||||
<if test="userId != 0"> |
||||
and id in ( |
||||
select udf_id from t_ds_relation_udfs_user where user_id=#{userId} |
||||
union select id as udf_id from t_ds_udfs where user_id=#{userId}) |
||||
</if> |
||||
order by create_time desc |
||||
</select> |
||||
<select id="getUdfFuncByType" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
||||
select * |
||||
from t_ds_udfs |
||||
where type=#{type} |
||||
<if test="userId != 0"> |
||||
and id in ( |
||||
select udf_id from t_ds_relation_udfs_user where user_id=#{userId} |
||||
union select id as udf_id from t_ds_udfs where user_id=#{userId}) |
||||
</if> |
||||
</select> |
||||
<select id="queryUdfFuncExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
||||
select * |
||||
from t_ds_udfs |
||||
where user_id <![CDATA[ <> ]]> #{userId} |
||||
</select> |
||||
<select id="queryAuthedUdfFunc" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc"> |
||||
SELECT u.* |
||||
from t_ds_udfs u,t_ds_relation_udfs_user rel |
||||
WHERE u.id = rel.udf_id |
||||
AND rel.user_id = #{userId} |
||||
</select> |
||||
</mapper> |
@ -1,31 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserAlertGroupMapper"> |
||||
<delete id="deleteByAlertgroupId"> |
||||
delete from t_ds_relation_user_alertgroup |
||||
where alertgroup_id = #{alertgroupId} |
||||
</delete> |
||||
<select id="listUserByAlertgroupId" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
SELECT u.* |
||||
FROM t_ds_relation_user_alertgroup g_u |
||||
JOIN t_ds_user u on g_u.user_id = u.id |
||||
WHERE g_u.alertgroup_id = #{alertgroupId} |
||||
</select> |
||||
</mapper> |
@ -1,72 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserMapper"> |
||||
<select id="queryAllGeneralUser" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
select * from t_ds_user |
||||
where user_type=1; |
||||
</select> |
||||
<select id="queryByUserNameAccurately" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
select * from t_ds_user |
||||
where user_name=#{userName} |
||||
</select> |
||||
<select id="queryUserByNamePassword" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
select * from t_ds_user |
||||
where user_name=#{userName} and user_password = #{password} |
||||
</select> |
||||
<select id="queryUserPaging" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
select u.id,u.user_name,u.user_password,u.user_type,u.email,u.phone,u.tenant_id,u.create_time, |
||||
u.update_time,t.tenant_name, |
||||
case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue, q.queue_name |
||||
from t_ds_user u |
||||
left join t_ds_tenant t on u.tenant_id=t.id |
||||
left join t_ds_queue q on t.queue_id = q.id |
||||
where 1=1 |
||||
<if test="userName!=null and userName != ''" > |
||||
and u.user_name like concat ('%', #{userName}, '%') |
||||
</if> |
||||
order by u.update_time desc |
||||
</select> |
||||
<select id="queryDetailsById" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
select u.*, t.tenant_name, |
||||
case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue_name |
||||
from t_ds_user u,t_ds_tenant t,t_ds_queue q |
||||
WHERE u.tenant_id = t.id and t.queue_id = q.id and u.id = #{userId} |
||||
</select> |
||||
<select id="queryUserListByAlertGroupId" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
select u.* |
||||
from t_ds_user u, t_ds_relation_user_alertgroup rel |
||||
where u.id = rel.user_id AND rel.alertgroup_id = #{alertgroupId} |
||||
</select> |
||||
<select id="queryUserListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
select * |
||||
from t_ds_user |
||||
where tenant_id = #{tenantId} |
||||
</select> |
||||
<select id="queryTenantCodeByUserId" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
SELECT u.*,t.tenant_code |
||||
FROM t_ds_user u, t_ds_tenant t |
||||
WHERE u.tenant_id = t.id AND u.id = #{userId} |
||||
</select> |
||||
<select id="queryUserByToken" resultType="org.apache.dolphinscheduler.dao.entity.User"> |
||||
select u.* |
||||
from t_ds_user u ,t_ds_access_token t |
||||
where u.id = t.user_id and token=#{token} and t.expire_time > NOW() |
||||
</select> |
||||
</mapper> |
@ -1,40 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?> |
||||
<!-- |
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more |
||||
~ contributor license agreements. See the NOTICE file distributed with |
||||
~ this work for additional information regarding copyright ownership. |
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
~ (the "License"); you may not use this file except in compliance with |
||||
~ the License. You may obtain a copy of the License at |
||||
~ |
||||
~ http://www.apache.org/licenses/LICENSE-2.0 |
||||
~ |
||||
~ Unless required by applicable law or agreed to in writing, software |
||||
~ distributed under the License is distributed on an "AS IS" BASIS, |
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
~ See the License for the specific language governing permissions and |
||||
~ limitations under the License. |
||||
--> |
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > |
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper"> |
||||
<select id="queryAllWorkerGroup" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup"> |
||||
select * |
||||
from t_ds_worker_group |
||||
order by update_time desc |
||||
</select> |
||||
<select id="queryWorkerGroupByName" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup"> |
||||
select * |
||||
from t_ds_worker_group |
||||
where name = #{name} |
||||
</select> |
||||
<select id="queryListPaging" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup"> |
||||
select * |
||||
from t_ds_worker_group |
||||
where 1 = 1 |
||||
<if test="searchVal != null and searchVal != ''"> |
||||
and name like concat('%', #{searchVal}, '%') |
||||
</if> |
||||
order by update_time desc |
||||
</select> |
||||
</mapper> |
@ -1,42 +0,0 @@
|
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
#zookeeper cluster |
||||
zookeeper.quorum=127.0.0.1:2181 |
||||
|
||||
#dolphinscheduler root directory |
||||
zookeeper.dolphinscheduler.root=/dolphinscheduler |
||||
|
||||
#zookeeper server dirctory |
||||
zookeeper.dolphinscheduler.dead.servers=/dolphinscheduler/dead-servers |
||||
zookeeper.dolphinscheduler.masters=/dolphinscheduler/masters |
||||
zookeeper.dolphinscheduler.workers=/dolphinscheduler/workers |
||||
|
||||
#zookeeper lock dirctory |
||||
zookeeper.dolphinscheduler.lock.masters=/dolphinscheduler/lock/masters |
||||
zookeeper.dolphinscheduler.lock.workers=/dolphinscheduler/lock/workers |
||||
|
||||
#dolphinscheduler failover directory |
||||
zookeeper.dolphinscheduler.lock.failover.masters=/dolphinscheduler/lock/failover/masters |
||||
zookeeper.dolphinscheduler.lock.failover.workers=/dolphinscheduler/lock/failover/workers |
||||
zookeeper.dolphinscheduler.lock.failover.startup.masters=/dolphinscheduler/lock/failover/startup-masters |
||||
|
||||
#dolphinscheduler failover directory |
||||
zookeeper.session.timeout=300 |
||||
zookeeper.connection.timeout=300 |
||||
zookeeper.retry.sleep=1000 |
||||
zookeeper.retry.maxtime=5 |
@ -1,263 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?> |
||||
|
||||
<!-- |
||||
Licensed to the Apache Software Foundation (ASF) under one |
||||
or more contributor license agreements. See the NOTICE file |
||||
distributed with this work for additional information |
||||
regarding copyright ownership. The ASF licenses this file |
||||
to you under the Apache License, Version 2.0 (the |
||||
"License"); you may not use this file except in compliance |
||||
with the License. You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, |
||||
software distributed under the License is distributed on an |
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
||||
KIND, either express or implied. See the License for the |
||||
specific language governing permissions and limitations |
||||
under the License. |
||||
--> |
||||
|
||||
<!-- |
||||
| This is the configuration file for Maven. It can be specified at two levels: |
||||
| |
||||
| 1. User Level. This settings.xml file provides configuration for a single user, |
||||
| and is normally provided in ${user.home}/.m2/settings.xml. |
||||
| |
||||
| NOTE: This location can be overridden with the CLI option: |
||||
| |
||||
| -s /path/to/user/settings.xml |
||||
| |
||||
| 2. Global Level. This settings.xml file provides configuration for all Maven |
||||
| users on a machine (assuming they're all using the same Maven |
||||
| installation). It's normally provided in |
||||
| ${maven.home}/conf/settings.xml. |
||||
| |
||||
| NOTE: This location can be overridden with the CLI option: |
||||
| |
||||
| -gs /path/to/global/settings.xml |
||||
| |
||||
| The sections in this sample file are intended to give you a running start at |
||||
| getting the most out of your Maven installation. Where appropriate, the default |
||||
| values (values used when the setting is not specified) are provided. |
||||
| |
||||
|--> |
||||
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0" |
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" |
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd"> |
||||
<!-- localRepository |
||||
| The path to the local repository maven will use to store artifacts. |
||||
| |
||||
| Default: ${user.home}/.m2/repository |
||||
<localRepository>/path/to/local/repo</localRepository> |
||||
--> |
||||
|
||||
<!-- interactiveMode |
||||
| This will determine whether maven prompts you when it needs input. If set to false, |
||||
| maven will use a sensible default value, perhaps based on some other setting, for |
||||
| the parameter in question. |
||||
| |
||||
| Default: true |
||||
<interactiveMode>true</interactiveMode> |
||||
--> |
||||
|
||||
<!-- offline |
||||
| Determines whether maven should attempt to connect to the network when executing a build. |
||||
| This will have an effect on artifact downloads, artifact deployment, and others. |
||||
| |
||||
| Default: false |
||||
<offline>false</offline> |
||||
--> |
||||
|
||||
<!-- pluginGroups |
||||
| This is a list of additional group identifiers that will be searched when resolving plugins by their prefix, i.e. |
||||
| when invoking a command line like "mvn prefix:goal". Maven will automatically add the group identifiers |
||||
| "org.apache.maven.plugins" and "org.codehaus.mojo" if these are not already contained in the list. |
||||
|--> |
||||
<pluginGroups> |
||||
<!-- pluginGroup |
||||
| Specifies a further group identifier to use for plugin lookup. |
||||
<pluginGroup>com.your.plugins</pluginGroup> |
||||
--> |
||||
</pluginGroups> |
||||
|
||||
<!-- proxies |
||||
| This is a list of proxies which can be used on this machine to connect to the network. |
||||
| Unless otherwise specified (by system property or command-line switch), the first proxy |
||||
| specification in this list marked as active will be used. |
||||
|--> |
||||
<proxies> |
||||
<!-- proxy |
||||
| Specification for one proxy, to be used in connecting to the network. |
||||
| |
||||
<proxy> |
||||
<id>optional</id> |
||||
<active>true</active> |
||||
<protocol>http</protocol> |
||||
<username>proxyuser</username> |
||||
<password>proxypass</password> |
||||
<host>proxy.host.net</host> |
||||
<port>80</port> |
||||
<nonProxyHosts>local.net|some.host.com</nonProxyHosts> |
||||
</proxy> |
||||
--> |
||||
</proxies> |
||||
|
||||
<!-- servers |
||||
| This is a list of authentication profiles, keyed by the server-id used within the system. |
||||
| Authentication profiles can be used whenever maven must make a connection to a remote server. |
||||
|--> |
||||
<servers> |
||||
<!-- server |
||||
| Specifies the authentication information to use when connecting to a particular server, identified by |
||||
| a unique name within the system (referred to by the 'id' attribute below). |
||||
| |
||||
| NOTE: You should either specify username/password OR privateKey/passphrase, since these pairings are |
||||
| used together. |
||||
| |
||||
<server> |
||||
<id>deploymentRepo</id> |
||||
<username>repouser</username> |
||||
<password>repopwd</password> |
||||
</server> |
||||
--> |
||||
|
||||
<!-- Another sample, using keys to authenticate. |
||||
<server> |
||||
<id>siteServer</id> |
||||
<privateKey>/path/to/private/key</privateKey> |
||||
<passphrase>optional; leave empty if not used.</passphrase> |
||||
</server> |
||||
--> |
||||
</servers> |
||||
|
||||
<!-- mirrors |
||||
| This is a list of mirrors to be used in downloading artifacts from remote repositories. |
||||
| |
||||
| It works like this: a POM may declare a repository to use in resolving certain artifacts. |
||||
| However, this repository may have problems with heavy traffic at times, so people have mirrored |
||||
| it to several places. |
||||
| |
||||
| That repository definition will have a unique id, so we can create a mirror reference for that |
||||
| repository, to be used as an alternate download site. The mirror site will be the preferred |
||||
| server for that repository. |
||||
|--> |
||||
<mirrors> |
||||
<!-- mirror |
||||
| Specifies a repository mirror site to use instead of a given repository. The repository that |
||||
| this mirror serves has an ID that matches the mirrorOf element of this mirror. IDs are used |
||||
| for inheritance and direct lookup purposes, and must be unique across the set of mirrors. |
||||
| |
||||
<mirror> |
||||
<id>mirrorId</id> |
||||
<mirrorOf>repositoryId</mirrorOf> |
||||
<name>Human Readable Name for this Mirror.</name> |
||||
<url>http://my.repository.com/repo/path</url> |
||||
</mirror> |
||||
--> |
||||
<mirror> |
||||
<id>nexus-aliyun</id> |
||||
<mirrorOf>central</mirrorOf> |
||||
<name>Nexus aliyun</name> |
||||
<url>http://maven.aliyun.com/nexus/content/groups/public</url> |
||||
</mirror> |
||||
</mirrors> |
||||
|
||||
<!-- profiles |
||||
| This is a list of profiles which can be activated in a variety of ways, and which can modify |
||||
| the build process. Profiles provided in the settings.xml are intended to provide local machine- |
||||
| specific paths and repository locations which allow the build to work in the local environment. |
||||
| |
||||
| For example, if you have an integration testing plugin - like cactus - that needs to know where |
||||
| your Tomcat instance is installed, you can provide a variable here such that the variable is |
||||
| dereferenced during the build process to configure the cactus plugin. |
||||
| |
||||
| As noted above, profiles can be activated in a variety of ways. One way - the activeProfiles |
||||
| section of this document (settings.xml) - will be discussed later. Another way essentially |
||||
| relies on the detection of a system property, either matching a particular value for the property, |
||||
| or merely testing its existence. Profiles can also be activated by JDK version prefix, where a |
||||
| value of '1.4' might activate a profile when the build is executed on a JDK version of '1.4.2_07'. |
||||
| Finally, the list of active profiles can be specified directly from the command line. |
||||
| |
||||
| NOTE: For profiles defined in the settings.xml, you are restricted to specifying only artifact |
||||
| repositories, plugin repositories, and free-form properties to be used as configuration |
||||
| variables for plugins in the POM. |
||||
| |
||||
|--> |
||||
<profiles> |
||||
<!-- profile |
||||
| Specifies a set of introductions to the build process, to be activated using one or more of the |
||||
| mechanisms described above. For inheritance purposes, and to activate profiles via <activatedProfiles/> |
||||
| or the command line, profiles have to have an ID that is unique. |
||||
| |
||||
| An encouraged best practice for profile identification is to use a consistent naming convention |
||||
| for profiles, such as 'env-dev', 'env-test', 'env-production', 'user-jdcasey', 'user-brett', etc. |
||||
| This will make it more intuitive to understand what the set of introduced profiles is attempting |
||||
| to accomplish, particularly when you only have a list of profile id's for debug. |
||||
| |
||||
| This profile example uses the JDK version to trigger activation, and provides a JDK-specific repo. |
||||
<profile> |
||||
<id>jdk-1.4</id> |
||||
|
||||
<activation> |
||||
<jdk>1.4</jdk> |
||||
</activation> |
||||
|
||||
<repositories> |
||||
<repository> |
||||
<id>jdk14</id> |
||||
<name>Repository for JDK 1.4 builds</name> |
||||
<url>http://www.myhost.com/maven/jdk14</url> |
||||
<layout>default</layout> |
||||
<snapshotPolicy>always</snapshotPolicy> |
||||
</repository> |
||||
</repositories> |
||||
</profile> |
||||
--> |
||||
|
||||
<!-- |
||||
| Here is another profile, activated by the system property 'target-env' with a value of 'dev', |
||||
| which provides a specific path to the Tomcat instance. To use this, your plugin configuration |
||||
| might hypothetically look like: |
||||
| |
||||
| ... |
||||
| <plugin> |
||||
| <groupId>org.myco.myplugins</groupId> |
||||
| <artifactId>myplugin</artifactId> |
||||
| |
||||
| <configuration> |
||||
| <tomcatLocation>${tomcatPath}</tomcatLocation> |
||||
| </configuration> |
||||
| </plugin> |
||||
| ... |
||||
| |
||||
| NOTE: If you just wanted to inject this configuration whenever someone set 'target-env' to |
||||
| anything, you could just leave off the <value/> inside the activation-property. |
||||
| |
||||
<profile> |
||||
<id>env-dev</id> |
||||
|
||||
<activation> |
||||
<property> |
||||
<name>target-env</name> |
||||
<value>dev</value> |
||||
</property> |
||||
</activation> |
||||
|
||||
<properties> |
||||
<tomcatPath>/path/to/tomcat/instance</tomcatPath> |
||||
</properties> |
||||
</profile> |
||||
--> |
||||
</profiles> |
||||
|
||||
<!-- activeProfiles |
||||
| List of profiles that are active for all builds. |
||||
| |
||||
<activeProfiles> |
||||
<activeProfile>alwaysActiveProfile</activeProfile> |
||||
<activeProfile>anotherAlwaysActiveProfile</activeProfile> |
||||
</activeProfiles> |
||||
--> |
||||
</settings> |
@ -0,0 +1,56 @@
|
||||
:: Licensed to the Apache Software Foundation (ASF) under one or more |
||||
:: contributor license agreements. See the NOTICE file distributed with |
||||
:: this work for additional information regarding copyright ownership. |
||||
:: The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
:: (the "License"); you may not use this file except in compliance with |
||||
:: the License. You may obtain a copy of the License at |
||||
:: |
||||
:: http://www.apache.org/licenses/LICENSE-2.0 |
||||
:: |
||||
:: Unless required by applicable law or agreed to in writing, software |
||||
:: distributed under the License is distributed on an "AS IS" BASIS, |
||||
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
:: See the License for the specific language governing permissions and |
||||
:: limitations under the License. |
||||
:: |
||||
@echo off |
||||
|
||||
echo "------ dolphinscheduler start - build -------" |
||||
set |
||||
|
||||
if not defined VERSION ( |
||||
echo "set environment variable [VERSION]" |
||||
for /f %%l in (%cd%\sql\soft_version) do (set VERSION=%%l) |
||||
) |
||||
|
||||
if not defined DOCKER_REPO ( |
||||
echo "set environment variable [DOCKER_REPO]" |
||||
set DOCKER_REPO='dolphinscheduler' |
||||
) |
||||
|
||||
echo "Version: %VERSION%" |
||||
echo "Repo: %DOCKER_REPO%" |
||||
|
||||
echo "Current Directory is %cd%" |
||||
|
||||
:: maven package(Project Directory) |
||||
echo "call mvn clean compile package -Prelease" |
||||
call mvn clean compile package -Prelease -DskipTests=true |
||||
if "%errorlevel%"=="1" goto :mvnFailed |
||||
|
||||
:: move dolphinscheduler-bin.tar.gz file to dockerfile directory |
||||
echo "move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\" |
||||
move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\ |
||||
|
||||
:: docker build |
||||
echo "docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\" |
||||
docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\ |
||||
if "%errorlevel%"=="1" goto :dockerBuildFailed |
||||
|
||||
echo "------ dolphinscheduler end - build -------" |
||||
|
||||
:mvnFailed |
||||
echo "MAVEN PACKAGE FAILED!" |
||||
|
||||
:dockerBuildFailed |
||||
echo "DOCKER BUILD FAILED!" |
@ -0,0 +1,23 @@
|
||||
:: Licensed to the Apache Software Foundation (ASF) under one or more |
||||
:: contributor license agreements. See the NOTICE file distributed with |
||||
:: this work for additional information regarding copyright ownership. |
||||
:: The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
:: (the "License"); you may not use this file except in compliance with |
||||
:: the License. You may obtain a copy of the License at |
||||
:: |
||||
:: http://www.apache.org/licenses/LICENSE-2.0 |
||||
:: |
||||
:: Unless required by applicable law or agreed to in writing, software |
||||
:: distributed under the License is distributed on an "AS IS" BASIS, |
||||
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
:: See the License for the specific language governing permissions and |
||||
:: limitations under the License. |
||||
:: |
||||
@echo off |
||||
|
||||
echo "------ push start -------" |
||||
set |
||||
|
||||
docker push %DOCKER_REPO%:%VERSION% |
||||
|
||||
echo "------ push end -------" |
@ -0,0 +1,100 @@
|
||||
#!/bin/bash |
||||
# |
||||
# Licensed to the Apache Software Foundation (ASF) under one or more |
||||
# contributor license agreements. See the NOTICE file distributed with |
||||
# this work for additional information regarding copyright ownership. |
||||
# The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
# (the "License"); you may not use this file except in compliance with |
||||
# the License. You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
# |
||||
|
||||
set -e |
||||
|
||||
echo "init env variables" |
||||
|
||||
# Define parameters default value. |
||||
#============================================================================ |
||||
# Database Source |
||||
#============================================================================ |
||||
export POSTGRESQL_HOST=${POSTGRESQL_HOST:-"127.0.0.1"} |
||||
export POSTGRESQL_PORT=${POSTGRESQL_PORT:-"5432"} |
||||
export POSTGRESQL_USERNAME=${POSTGRESQL_USERNAME:-"root"} |
||||
export POSTGRESQL_PASSWORD=${POSTGRESQL_PASSWORD:-"root"} |
||||
|
||||
#============================================================================ |
||||
# System |
||||
#============================================================================ |
||||
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"} |
||||
|
||||
#============================================================================ |
||||
# Zookeeper |
||||
#============================================================================ |
||||
export TASK_QUEUE=${TASK_QUEUE:-"zookeeper"} |
||||
export ZOOKEEPER_QUORUM=${ZOOKEEPER_QUORUM:-"127.0.0.1:2181"} |
||||
|
||||
#============================================================================ |
||||
# Master Server |
||||
#============================================================================ |
||||
export MASTER_EXEC_THREADS=${MASTER_EXEC_THREADS:-"100"} |
||||
export MASTER_EXEC_TASK_NUM=${MASTER_EXEC_TASK_NUM:-"20"} |
||||
export MASTER_HEARTBEAT_INTERVAL=${MASTER_HEARTBEAT_INTERVAL:-"10"} |
||||
export MASTER_TASK_COMMIT_RETRYTIMES=${MASTER_TASK_COMMIT_RETRYTIMES:-"5"} |
||||
export MASTER_TASK_COMMIT_INTERVAL=${MASTER_TASK_COMMIT_INTERVAL:-"1000"} |
||||
export MASTER_MAX_CPULOAD_AVG=${MASTER_MAX_CPULOAD_AVG:-"100"} |
||||
export MASTER_RESERVED_MEMORY=${MASTER_RESERVED_MEMORY:-"0.1"} |
||||
|
||||
#============================================================================ |
||||
# Worker Server |
||||
#============================================================================ |
||||
export WORKER_EXEC_THREADS=${WORKER_EXEC_THREADS:-"100"} |
||||
export WORKER_HEARTBEAT_INTERVAL=${WORKER_HEARTBEAT_INTERVAL:-"10"} |
||||
export WORKER_FETCH_TASK_NUM=${WORKER_FETCH_TASK_NUM:-"3"} |
||||
export WORKER_MAX_CPULOAD_AVG=${WORKER_MAX_CPULOAD_AVG:-"100"} |
||||
export WORKER_RESERVED_MEMORY=${WORKER_RESERVED_MEMORY:-"0.1"} |
||||
|
||||
#============================================================================ |
||||
# Alert Server |
||||
#============================================================================ |
||||
# XLS FILE |
||||
export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"} |
||||
# mail |
||||
export MAIL_SERVER_HOST=${MAIL_SERVER_HOST:-""} |
||||
export MAIL_SERVER_PORT=${MAIL_SERVER_PORT:-""} |
||||
export MAIL_SENDER=${MAIL_SENDER:-""} |
||||
export MAIL_USER=${MAIL_USER:-""} |
||||
export MAIL_PASSWD=${MAIL_PASSWD:-""} |
||||
export MAIL_SMTP_STARTTLS_ENABLE=${MAIL_SMTP_STARTTLS_ENABLE:-"true"} |
||||
export MAIL_SMTP_SSL_ENABLE=${MAIL_SMTP_SSL_ENABLE:-"false"} |
||||
export MAIL_SMTP_SSL_TRUST=${MAIL_SMTP_SSL_TRUST:-""} |
||||
# wechat |
||||
export ENTERPRISE_WECHAT_ENABLE=${ENTERPRISE_WECHAT_ENABLE:-"false"} |
||||
export ENTERPRISE_WECHAT_CORP_ID=${ENTERPRISE_WECHAT_CORP_ID:-""} |
||||
export ENTERPRISE_WECHAT_SECRET=${ENTERPRISE_WECHAT_SECRET:-""} |
||||
export ENTERPRISE_WECHAT_AGENT_ID=${ENTERPRISE_WECHAT_AGENT_ID:-""} |
||||
export ENTERPRISE_WECHAT_USERS=${ENTERPRISE_WECHAT_USERS:-""} |
||||
|
||||
#============================================================================ |
||||
# Frontend |
||||
#============================================================================ |
||||
export FRONTEND_API_SERVER_HOST=${FRONTEND_API_SERVER_HOST:-"127.0.0.1"} |
||||
export FRONTEND_API_SERVER_PORT=${FRONTEND_API_SERVER_PORT:-"12345"} |
||||
|
||||
echo "generate app config" |
||||
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do |
||||
eval "cat << EOF |
||||
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line}) |
||||
EOF |
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} |
||||
done |
||||
|
||||
echo "generate nginx config" |
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf |
@ -0,0 +1,44 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.dolphinscheduler.alert.template; |
||||
|
||||
import org.apache.dolphinscheduler.common.enums.ShowType; |
||||
|
||||
/** |
||||
* alert message template |
||||
*/ |
||||
public interface AlertTemplate { |
||||
|
||||
/** |
||||
* get a message from a specified alert template |
||||
* @param content alert message content |
||||
* @param showType show type |
||||
* @param showAll whether to show all |
||||
* @return a message from a specified alert template |
||||
*/ |
||||
String getMessageFromTemplate(String content, ShowType showType,boolean showAll); |
||||
|
||||
/** |
||||
* default showAll is true |
||||
* @param content alert message content |
||||
* @param showType show type |
||||
* @return a message from a specified alert template |
||||
*/ |
||||
default String getMessageFromTemplate(String content,ShowType showType){ |
||||
return getMessageFromTemplate(content,showType,true); |
||||
} |
||||
} |
@ -0,0 +1,54 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.dolphinscheduler.alert.template; |
||||
|
||||
import org.apache.dolphinscheduler.alert.template.impl.DefaultHTMLTemplate; |
||||
import org.apache.dolphinscheduler.alert.utils.Constants; |
||||
import org.apache.dolphinscheduler.alert.utils.PropertyUtils; |
||||
import org.apache.dolphinscheduler.common.utils.StringUtils; |
||||
import org.slf4j.Logger; |
||||
import org.slf4j.LoggerFactory; |
||||
|
||||
/** |
||||
* the alert template factory |
||||
*/ |
||||
public class AlertTemplateFactory { |
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(AlertTemplateFactory.class); |
||||
|
||||
private static final String alertTemplate = PropertyUtils.getString(Constants.ALERT_TEMPLATE); |
||||
|
||||
private AlertTemplateFactory(){} |
||||
|
||||
/** |
||||
* get a template from alert.properties conf file |
||||
* @return a template, default is DefaultHTMLTemplate |
||||
*/ |
||||
public static AlertTemplate getMessageTemplate() { |
||||
|
||||
if(StringUtils.isEmpty(alertTemplate)){ |
||||
return new DefaultHTMLTemplate(); |
||||
} |
||||
|
||||
switch (alertTemplate){ |
||||
case "html": |
||||
return new DefaultHTMLTemplate(); |
||||
default: |
||||
throw new IllegalArgumentException(String.format("not support alert template: %s",alertTemplate)); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,161 @@
|
||||
/* |
||||
* Licensed to the Apache Software Foundation (ASF) under one or more |
||||
* contributor license agreements. See the NOTICE file distributed with |
||||
* this work for additional information regarding copyright ownership. |
||||
* The ASF licenses this file to You under the Apache License, Version 2.0 |
||||
* (the "License"); you may not use this file except in compliance with |
||||
* the License. You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
package org.apache.dolphinscheduler.alert.template.impl; |
||||
|
||||
import org.apache.dolphinscheduler.alert.template.AlertTemplate; |
||||
import org.apache.dolphinscheduler.alert.utils.Constants; |
||||
import org.apache.dolphinscheduler.alert.utils.JSONUtils; |
||||
import org.apache.dolphinscheduler.alert.utils.MailUtils; |
||||
import org.apache.dolphinscheduler.common.enums.ShowType; |
||||
import org.apache.dolphinscheduler.common.utils.StringUtils; |
||||
import org.slf4j.Logger; |
||||
import org.slf4j.LoggerFactory; |
||||
|
||||
import java.util.*; |
||||
|
||||
import static org.apache.dolphinscheduler.common.utils.Preconditions.*; |
||||
|
||||
/** |
||||
* the default html alert message template |
||||
*/ |
||||
public class DefaultHTMLTemplate implements AlertTemplate { |
||||
|
||||
public static final Logger logger = LoggerFactory.getLogger(DefaultHTMLTemplate.class); |
||||
|
||||
@Override |
||||
public String getMessageFromTemplate(String content, ShowType showType,boolean showAll) { |
||||
|
||||
switch (showType){ |
||||
case TABLE: |
||||
return getTableTypeMessage(content,showAll); |
||||
case TEXT: |
||||
return getTextTypeMessage(content,showAll); |
||||
default: |
||||
throw new IllegalArgumentException(String.format("not support showType: %s in DefaultHTMLTemplate",showType)); |
||||
} |
||||
} |
||||
|
||||
/** |
||||
* get alert message which type is TABLE |
||||
* @param content message content |
||||
* @param showAll weather to show all |
||||
* @return alert message |
||||
*/ |
||||
private String getTableTypeMessage(String content,boolean showAll){ |
||||
|
||||
if (StringUtils.isNotEmpty(content)){ |
||||
List<LinkedHashMap> mapItemsList = JSONUtils.toList(content, LinkedHashMap.class); |
||||
|
||||
if(!showAll && mapItemsList.size() > Constants.NUMBER_1000){ |
||||
mapItemsList = mapItemsList.subList(0,Constants.NUMBER_1000); |
||||
} |
||||
|
||||
StringBuilder contents = new StringBuilder(200); |
||||
|
||||
boolean flag = true; |
||||
|
||||
String title = ""; |
||||
for (LinkedHashMap mapItems : mapItemsList){ |
||||
|
||||
Set<Map.Entry<String, Object>> entries = mapItems.entrySet(); |
||||
|
||||
Iterator<Map.Entry<String, Object>> iterator = entries.iterator(); |
||||
|
||||
StringBuilder t = new StringBuilder(Constants.TR); |
||||
StringBuilder cs = new StringBuilder(Constants.TR); |
||||
while (iterator.hasNext()){ |
||||
|
||||
Map.Entry<String, Object> entry = iterator.next(); |
||||
t.append(Constants.TH).append(entry.getKey()).append(Constants.TH_END); |
||||
cs.append(Constants.TD).append(String.valueOf(entry.getValue())).append(Constants.TD_END); |
||||
|
||||
} |
||||
t.append(Constants.TR_END); |
||||
cs.append(Constants.TR_END); |
||||
if (flag){ |
||||
title = t.toString(); |
||||
} |
||||
flag = false; |
||||
contents.append(cs); |
||||
} |
||||
|
||||
return getMessageFromHtmlTemplate(title,contents.toString()); |
||||
} |
||||
|
||||
return content; |
||||
} |
||||
|
||||
/** |
||||
* get alert message which type is TEXT |
||||
* @param content message content |
||||
* @param showAll weather to show all |
||||
* @return alert message |
||||
*/ |
||||
private String getTextTypeMessage(String content,boolean showAll){ |
||||
|
||||
if (StringUtils.isNotEmpty(content)){ |
||||
List<String> list; |
||||
try { |
||||
list = JSONUtils.toList(content,String.class); |
||||
}catch (Exception e){ |
||||
logger.error("json format exception",e); |
||||
return null; |
||||
} |
||||
|
||||
StringBuilder contents = new StringBuilder(100); |
||||
for (String str : list){ |
||||
contents.append(Constants.TR); |
||||
contents.append(Constants.TD).append(str).append(Constants.TD_END); |
||||
contents.append(Constants.TR_END); |
||||
} |
||||
|
||||
return getMessageFromHtmlTemplate(null,contents.toString()); |
||||
|
||||
} |
||||
|
||||
return content; |
||||
} |
||||
|
||||
/** |
||||
* get alert message from a html template |
||||
* @param title message title |
||||
* @param content message content |
||||
* @return alert message which use html template |
||||
*/ |
||||
private String getMessageFromHtmlTemplate(String title,String content){ |
||||
|
||||
checkNotNull(content); |
||||
String htmlTableThead = StringUtils.isEmpty(title) ? "" : String.format("<thead>%s</thead>\n",title); |
||||
|
||||
return "<html>\n" + |
||||
" <head>\n" + |
||||
" <title>dolphinscheduler</title>\n" + |
||||
" <meta name='Keywords' content=''>\n" + |
||||
" <meta name='Description' content=''>\n" + |
||||
" <style type=\"text/css\">\n" + |
||||
" table {margin-top:0px;padding-top:0px;border:1px solid;font-size: 14px;color: #333333;border-width: 1px;border-color: #666666;border-collapse: collapse;}\n" + |
||||
" table th {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #dedede;text-align: right;}\n" + |
||||
" table td {border-width: 1px;padding: 8px;border-style: solid;border-color: #666666;background-color: #ffffff;text-align: right;}\n" + |
||||
" </style>\n" + |
||||
" </head>\n" + |
||||
" <body style=\"margin:0;padding:0\">\n" + |
||||
" <table border=\"1px\" cellpadding=\"5px\" cellspacing=\"-10px\">\n" + htmlTableThead + content + |
||||
" </table>\n" + |
||||
" </body>\n" + |
||||
"</html>"; |
||||
} |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue