Browse Source

Merge pull request #9 from apache/dev

update code
pull/2/head
BoYiZhang 5 years ago committed by GitHub
parent
commit
72408b26b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 13
      .github/workflows/ci_e2e.yml
  2. 10
      .github/workflows/ci_ut.yml
  3. 36
      docker/build/Dockerfile
  4. 6
      docker/build/README.md
  5. 6
      docker/build/README_zh_CN.md
  6. 0
      docker/build/checkpoint.sh
  7. 0
      docker/build/conf/dolphinscheduler/alert.properties.tpl
  8. 0
      docker/build/conf/dolphinscheduler/application-api.properties.tpl
  9. 0
      docker/build/conf/dolphinscheduler/common.properties.tpl
  10. 0
      docker/build/conf/dolphinscheduler/datasource.properties.tpl
  11. 0
      docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh
  12. 0
      docker/build/conf/dolphinscheduler/logback/logback-alert.xml
  13. 0
      docker/build/conf/dolphinscheduler/logback/logback-api.xml
  14. 0
      docker/build/conf/dolphinscheduler/logback/logback-master.xml
  15. 0
      docker/build/conf/dolphinscheduler/logback/logback-worker.xml
  16. 0
      docker/build/conf/dolphinscheduler/master.properties.tpl
  17. 0
      docker/build/conf/dolphinscheduler/quartz.properties.tpl
  18. 0
      docker/build/conf/dolphinscheduler/worker.properties.tpl
  19. 0
      docker/build/conf/dolphinscheduler/zookeeper.properties.tpl
  20. 0
      docker/build/conf/nginx/dolphinscheduler.conf
  21. 0
      docker/build/conf/zookeeper/zoo.cfg
  22. 14
      docker/build/hooks/build
  23. 12
      docker/build/hooks/build.bat
  24. 0
      docker/build/hooks/push
  25. 0
      docker/build/hooks/push.bat
  26. 0
      docker/build/startup-init-conf.sh
  27. 53
      docker/build/startup.sh
  28. 40
      docker/docker-compose.yml
  29. 2
      docker/docker-swarm/check
  30. 47
      docker/docker-swarm/docker-compose.yml
  31. 27
      docker/docker-swarm/docker-stack.yml
  32. 6
      docker/kubernetes/dolphinscheduler/values.yaml
  33. 763
      docker/postgres/docker-entrypoint-initdb/init.sql
  34. 2
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java
  35. 2
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java
  36. 2
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
  37. 2
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java
  38. 14
      dolphinscheduler-common/pom.xml
  39. 4
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/plugin/FilePluginManager.java
  40. 138
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/RetryerUtils.java
  41. 325
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/RetryerUtilsTest.java
  42. 54
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/dispatch/executor/NettyExecutorManager.java
  43. 27
      dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
  44. 7
      dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java
  45. 8
      pom.xml
  46. 11
      script/scp-hosts.sh
  47. 3
      script/start-all.sh
  48. 3
      script/stop-all.sh
  49. 3
      sql/dolphinscheduler-postgre.sql
  50. 1
      sql/dolphinscheduler_mysql.sql
  51. 1
      tools/dependencies/known-dependencies.txt

13
.github/workflows/ci_e2e.yml

@ -44,15 +44,14 @@ jobs:
${{ runner.os }}-maven-
- name: Build Image
run: |
export VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'`
sh ./dockerfile/hooks/build
sh ./docker/build/hooks/build
- name: Docker Run
run: |
VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'`
mkdir -p /tmp/logs
docker run -dit -e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -v /tmp/logs:/opt/dolphinscheduler/logs -p 8888:8888 dolphinscheduler:$VERSION all
export VERSION=$(cat $(pwd)/pom.xml | grep '<version>' -m 1 | awk '{print $1}' | sed 's/<version>//' | sed 's/<\/version>//')
sed -i "s/apache\/dolphinscheduler:latest/apache\/dolphinscheduler:${VERSION}/g" $(pwd)/docker/docker-swarm/docker-compose.yml
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml up -d
- name: Check Server Status
run: sh ./dockerfile/hooks/check
run: sh $(pwd)/docker/docker-swarm/check
- name: Prepare e2e env
run: |
sudo apt-get install -y libxss1 libappindicator1 libindicator7 xvfb unzip libgbm1
@ -70,6 +69,6 @@ jobs:
uses: actions/upload-artifact@v1
with:
name: dslogs
path: /tmp/logs
path: /var/lib/docker/volumes/docker-swarm_dolphinscheduler-logs/_data

10
.github/workflows/ci_ut.yml

@ -21,7 +21,6 @@ on:
branches:
- dev
env:
DOCKER_DIR: ./docker
LOG_DIR: /tmp/dolphinscheduler
name: Unit Test
@ -47,7 +46,11 @@ jobs:
restore-keys: |
${{ runner.os }}-maven-
- name: Bootstrap database
run: cd ${DOCKER_DIR} && docker-compose up -d
run: |
sed -i "s/: root/: test/g" $(pwd)/docker/docker-swarm/docker-compose.yml
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml create --force-recreate dolphinscheduler-zookeeper dolphinscheduler-postgresql
sudo cp $(pwd)/sql/dolphinscheduler-postgre.sql $(docker volume inspect docker-swarm_dolphinscheduler-postgresql-initdb | grep "Mountpoint" | awk -F "\"" '{print $4}')
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml up -d dolphinscheduler-zookeeper dolphinscheduler-postgresql
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
@ -82,6 +85,5 @@ jobs:
- name: Collect logs
run: |
mkdir -p ${LOG_DIR}
cd ${DOCKER_DIR}
docker-compose logs db > ${LOG_DIR}/db.txt
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml logs dolphinscheduler-postgresql > ${LOG_DIR}/db.txt
continue-on-error: true

36
dockerfile/Dockerfile → docker/build/Dockerfile

@ -37,32 +37,19 @@ RUN apk add openjdk8
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
ENV PATH $JAVA_HOME/bin:$PATH
#3. install zk
RUN cd /opt && \
wget https://downloads.apache.org/zookeeper/zookeeper-3.5.7/apache-zookeeper-3.5.7-bin.tar.gz && \
tar -zxvf apache-zookeeper-3.5.7-bin.tar.gz && \
mv apache-zookeeper-3.5.7-bin zookeeper && \
mkdir -p /tmp/zookeeper && \
rm -rf ./zookeeper-*tar.gz && \
rm -rf /opt/zookeeper/conf/zoo_sample.cfg
ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
ENV ZK_HOME /opt/zookeeper
ENV PATH $ZK_HOME/bin:$PATH
#3. add dolphinscheduler
ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz /opt/
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin/ /opt/dolphinscheduler/
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#4. install pg
RUN apk add postgresql postgresql-contrib
#5. add dolphinscheduler
ADD ./apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz /opt/
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin/ /opt/dolphinscheduler/
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#6. modify nginx
#5. modify nginx
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \
rm -rf /etc/nginx/conf.d/*
ADD ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
#7. add configuration and modify permissions and set soft links
#6. add configuration and modify permissions and set soft links
ADD ./checkpoint.sh /root/checkpoint.sh
ADD ./startup-init-conf.sh /root/startup-init-conf.sh
ADD ./startup.sh /root/startup.sh
@ -75,22 +62,21 @@ RUN chmod +x /root/checkpoint.sh && \
chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
chmod +x /opt/dolphinscheduler/script/*.sh && \
chmod +x /opt/dolphinscheduler/bin/*.sh && \
chmod +x /opt/zookeeper/bin/*.sh && \
dos2unix /root/checkpoint.sh && \
dos2unix /root/startup-init-conf.sh && \
dos2unix /root/startup.sh && \
dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
dos2unix /opt/dolphinscheduler/script/*.sh && \
dos2unix /opt/dolphinscheduler/bin/*.sh && \
dos2unix /opt/zookeeper/bin/*.sh && \
rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls
#8. remove apk index cache
RUN rm -rf /var/cache/apk/*
#7. remove apk index cache and disable coredup for sudo
RUN rm -rf /var/cache/apk/* && \
echo "Set disable_coredump false" >> /etc/sudo.conf
#9. expose port
#8. expose port
EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]

6
dockerfile/README.md → docker/build/README.md

@ -109,16 +109,16 @@ In Unix-Like, Example:
```bash
$ cd path/incubator-dolphinscheduler
$ sh ./dockerfile/hooks/build
$ sh ./docker/build/hooks/build
```
In Windows, Example:
```bat
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat
c:\incubator-dolphinscheduler>.\docker\build\hooks\build.bat
```
Please read `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat` script files if you don't understand
Please read `./docker/build/hooks/build` `./docker/build/hooks/build.bat` script files if you don't understand
## Environment Variables

6
dockerfile/README_zh_CN.md → docker/build/README_zh_CN.md

@ -109,16 +109,16 @@ dolphinscheduler frontend
```bash
$ cd path/incubator-dolphinscheduler
$ sh ./dockerfile/hooks/build
$ sh ./docker/build/hooks/build
```
Windows系统, 如下:
```bat
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat
c:\incubator-dolphinscheduler>.\docker\build\hooks\build.bat
```
如果你不理解这些脚本 `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat`,请阅读里面的内容。
如果你不理解这些脚本 `./docker/build/hooks/build` `./docker/build/hooks/build.bat`,请阅读里面的内容。
## 环境变量

0
dockerfile/checkpoint.sh → docker/build/checkpoint.sh

0
dockerfile/conf/dolphinscheduler/alert.properties.tpl → docker/build/conf/dolphinscheduler/alert.properties.tpl

0
dockerfile/conf/dolphinscheduler/application-api.properties.tpl → docker/build/conf/dolphinscheduler/application-api.properties.tpl

0
dockerfile/conf/dolphinscheduler/common.properties.tpl → docker/build/conf/dolphinscheduler/common.properties.tpl

0
dockerfile/conf/dolphinscheduler/datasource.properties.tpl → docker/build/conf/dolphinscheduler/datasource.properties.tpl

0
dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env.sh → docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh vendored

0
dockerfile/conf/dolphinscheduler/logback/logback-alert.xml → docker/build/conf/dolphinscheduler/logback/logback-alert.xml

0
dockerfile/conf/dolphinscheduler/logback/logback-api.xml → docker/build/conf/dolphinscheduler/logback/logback-api.xml

0
dockerfile/conf/dolphinscheduler/logback/logback-master.xml → docker/build/conf/dolphinscheduler/logback/logback-master.xml

0
dockerfile/conf/dolphinscheduler/logback/logback-worker.xml → docker/build/conf/dolphinscheduler/logback/logback-worker.xml

0
dockerfile/conf/dolphinscheduler/master.properties.tpl → docker/build/conf/dolphinscheduler/master.properties.tpl

0
dockerfile/conf/dolphinscheduler/quartz.properties.tpl → docker/build/conf/dolphinscheduler/quartz.properties.tpl

0
dockerfile/conf/dolphinscheduler/worker.properties.tpl → docker/build/conf/dolphinscheduler/worker.properties.tpl

0
dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl → docker/build/conf/dolphinscheduler/zookeeper.properties.tpl

0
dockerfile/conf/nginx/dolphinscheduler.conf → docker/build/conf/nginx/dolphinscheduler.conf

0
dockerfile/conf/zookeeper/zoo.cfg → docker/build/conf/zookeeper/zoo.cfg

14
dockerfile/hooks/build → docker/build/hooks/build

@ -24,13 +24,13 @@ printenv
if [ -z "${VERSION}" ]
then
echo "set default environment variable [VERSION]"
VERSION=$(cat $(pwd)/sql/soft_version)
export VERSION=$(cat $(pwd)/pom.xml | grep '<version>' -m 1 | awk '{print $1}' | sed 's/<version>//' | sed 's/<\/version>//')
fi
if [ "${DOCKER_REPO}x" = "x" ]
then
echo "set default environment variable [DOCKER_REPO]"
DOCKER_REPO='dolphinscheduler'
export DOCKER_REPO='apache/dolphinscheduler'
fi
echo "Version: $VERSION"
@ -42,12 +42,12 @@ echo -e "Current Directory is $(pwd)\n"
echo -e "mvn -B clean compile package -Prelease -Dmaven.test.skip=true"
mvn -B clean compile package -Prelease -Dmaven.test.skip=true
# mv dolphinscheduler-bin.tar.gz file to dockerfile directory
echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz $(pwd)/dockerfile/\n"
mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz $(pwd)/dockerfile/
# mv dolphinscheduler-bin.tar.gz file to docker/build directory
echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz $(pwd)/docker/build/\n"
mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz $(pwd)/docker/build/
# docker build
echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/dockerfile/\n"
docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/dockerfile/
echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/\n"
sudo docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/
echo "------ dolphinscheduler end - build -------"

12
dockerfile/hooks/build.bat → docker/build/hooks/build.bat

@ -38,13 +38,13 @@ echo "call mvn clean compile package -Prelease"
call mvn clean compile package -Prelease -DskipTests=true
if "%errorlevel%"=="1" goto :mvnFailed
:: move dolphinscheduler-bin.tar.gz file to dockerfile directory
echo "move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\"
move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\
:: move dolphinscheduler-bin.tar.gz file to docker/build directory
echo "move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\docker\build\"
move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\docker\build\
:: docker build
echo "docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\"
docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\
echo "docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\docker\build\"
docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\docker\build\
if "%errorlevel%"=="1" goto :dockerBuildFailed
echo "------ dolphinscheduler end - build -------"
@ -53,4 +53,4 @@ echo "------ dolphinscheduler end - build -------"
echo "MAVEN PACKAGE FAILED!"
:dockerBuildFailed
echo "DOCKER BUILD FAILED!"
echo "DOCKER BUILD FAILED!"

0
dockerfile/hooks/push → docker/build/hooks/push

0
dockerfile/hooks/push.bat → docker/build/hooks/push.bat

0
dockerfile/startup-init-conf.sh → docker/build/startup-init-conf.sh

53
dockerfile/startup.sh → docker/build/startup.sh

@ -24,31 +24,6 @@ DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
# start postgresql
initPostgreSQL() {
echo "checking postgresql"
if [[ "${POSTGRESQL_HOST}" = "127.0.0.1" || "${POSTGRESQL_HOST}" = "localhost" ]]; then
export PGPORT=${POSTGRESQL_PORT}
echo "start postgresql service"
rc-service postgresql restart
# role if not exists, create
flag=$(sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='${POSTGRESQL_USERNAME}'")
if [ -z "${flag}" ]; then
echo "create user"
sudo -u postgres psql -tAc "create user ${POSTGRESQL_USERNAME} with password '${POSTGRESQL_PASSWORD}'"
fi
# database if not exists, create
flag=$(sudo -u postgres psql -tAc "select 1 from pg_database where datname='dolphinscheduler'")
if [ -z "${flag}" ]; then
echo "init db"
sudo -u postgres psql -tAc "create database dolphinscheduler owner ${POSTGRESQL_USERNAME}"
fi
# grant
sudo -u postgres psql -tAc "grant all privileges on database dolphinscheduler to ${POSTGRESQL_USERNAME}"
fi
echo "test postgresql service"
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
@ -73,24 +48,18 @@ initPostgreSQL() {
# start zk
initZK() {
echo -e "checking zookeeper"
if [[ "${ZOOKEEPER_QUORUM}" = "127.0.0.1:2181" || "${ZOOKEEPER_QUORUM}" = "localhost:2181" ]]; then
echo "start local zookeeper"
/opt/zookeeper/bin/zkServer.sh restart
else
echo "connect remote zookeeper"
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 5
done
echo "connect remote zookeeper"
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 5
done
fi
done
}
# start nginx

40
docker/docker-compose.yml

@ -1,40 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: '2'
services:
zookeeper:
image: zookeeper
restart: always
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOO_MY_ID: 1
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
db:
image: postgres
container_name: postgres
environment:
- POSTGRES_USER=test
- POSTGRES_PASSWORD=test
- POSTGRES_DB=dolphinscheduler
ports:
- "5432:5432"
volumes:
- pgdata:/var/lib/postgresql/data
- ./postgres/docker-entrypoint-initdb:/docker-entrypoint-initdb.d
volumes:
pgdata:

2
dockerfile/hooks/check → docker/docker-swarm/check

@ -17,7 +17,7 @@
#
echo "------ dolphinscheduler check - server - status -------"
sleep 60
server_num=$(docker top `docker container list | grep '/sbin/tini' | awk '{print $1}'`| grep java | grep "dolphinscheduler" | awk -F 'classpath ' '{print $2}' | awk '{print $2}' | sort | uniq -c | wc -l)
server_num=$(docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml top | grep java | grep "dolphinscheduler" | awk -F 'classpath ' '{print $2}' | awk '{print $2}' | sort | uniq -c | wc -l)
if [ $server_num -eq 5 ]
then
echo "Server all start successfully"

47
docker/docker-swarm/docker-compose.yml

@ -30,6 +30,7 @@ services:
POSTGRESQL_DATABASE: dolphinscheduler
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
- dolphinscheduler-postgresql-initdb:/docker-entrypoint-initdb.d
networks:
- dolphinscheduler
@ -41,13 +42,14 @@ services:
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
- dolphinscheduler
dolphinscheduler-api:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-api
command: ["api-server"]
ports:
@ -70,12 +72,12 @@ services:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-api:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-frontend:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-frontend
command: ["frontend"]
ports:
@ -93,12 +95,12 @@ services:
depends_on:
- dolphinscheduler-api
volumes:
- dolphinscheduler-frontend:/var/log/nginx
- dolphinscheduler-logs:/var/log/nginx
networks:
- dolphinscheduler
dolphinscheduler-alert:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-alert
command: ["alert-server"]
environment:
@ -130,18 +132,18 @@ services:
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
volumes:
- dolphinscheduler-alert:/opt/dolphinscheduler/logs
networks:
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-master:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-master
command: ["master-server"]
ports:
ports:
- 5678:5678
environment:
environment:
TZ: Asia/Shanghai
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
@ -162,22 +164,22 @@ services:
timeout: 5s
retries: 3
start_period: 30s
depends_on:
depends_on:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-master:/opt/dolphinscheduler/logs
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-worker:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-worker
command: ["worker-server"]
ports:
ports:
- 1234:1234
- 50051:50051
environment:
environment:
TZ: Asia/Shanghai
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
@ -209,7 +211,7 @@ services:
source: dolphinscheduler-worker-data
target: /tmp/dolphinscheduler
- type: volume
source: dolphinscheduler-worker-logs
source: dolphinscheduler-logs
target: /opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -220,13 +222,10 @@ networks:
volumes:
dolphinscheduler-postgresql:
dolphinscheduler-postgresql-initdb:
dolphinscheduler-zookeeper:
dolphinscheduler-api:
dolphinscheduler-frontend:
dolphinscheduler-alert:
dolphinscheduler-master:
dolphinscheduler-worker-data:
dolphinscheduler-worker-logs:
dolphinscheduler-logs:
configs:
dolphinscheduler-worker-task-env:

27
docker/docker-swarm/docker-stack.yml

@ -42,6 +42,7 @@ services:
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
@ -51,7 +52,7 @@ services:
replicas: 1
dolphinscheduler-api:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["api-server"]
ports:
- 12345:12345
@ -70,7 +71,7 @@ services:
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-api:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
@ -78,7 +79,7 @@ services:
replicas: 1
dolphinscheduler-frontend:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["frontend"]
ports:
- 8888:8888
@ -93,7 +94,7 @@ services:
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-frontend:/var/log/nginx
- dolphinscheduler-logs:/var/log/nginx
networks:
- dolphinscheduler
deploy:
@ -101,7 +102,7 @@ services:
replicas: 1
dolphinscheduler-alert:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["alert-server"]
environment:
TZ: Asia/Shanghai
@ -131,7 +132,7 @@ services:
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-alert:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
@ -139,7 +140,7 @@ services:
replicas: 1
dolphinscheduler-master:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["master-server"]
ports:
- 5678:5678
@ -165,7 +166,7 @@ services:
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-master:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
@ -173,7 +174,7 @@ services:
replicas: 1
dolphinscheduler-worker:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["worker-server"]
ports:
- 1234:1234
@ -201,7 +202,7 @@ services:
start_period: 30s
volumes:
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
- dolphinscheduler-worker-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
configs:
- source: dolphinscheduler-worker-task-env
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
@ -218,12 +219,8 @@ networks:
volumes:
dolphinscheduler-postgresql:
dolphinscheduler-zookeeper:
dolphinscheduler-api:
dolphinscheduler-frontend:
dolphinscheduler-alert:
dolphinscheduler-master:
dolphinscheduler-worker-data:
dolphinscheduler-worker-logs:
dolphinscheduler-logs:
configs:
dolphinscheduler-worker-task-env:

6
docker/kubernetes/dolphinscheduler/values.yaml

@ -25,9 +25,9 @@ fullnameOverride: ""
timezone: "Asia/Shanghai"
image:
registry: "docker.io"
registry: "apache"
repository: "dolphinscheduler"
tag: "1.3.0"
tag: "latest"
pullPolicy: "IfNotPresent"
imagePullSecrets: []
@ -56,6 +56,8 @@ externalDatabase:
zookeeper:
enabled: true
taskQueue: "zookeeper"
config:
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
service:
port: "2181"
persistence:

763
docker/postgres/docker-entrypoint-initdb/init.sql

@ -1,763 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
DROP TABLE IF EXISTS QRTZ_LOCKS;
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_JOB_DETAILS(
SCHED_NAME character varying(120) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
JOB_CLASS_NAME character varying(250) NOT NULL,
IS_DURABLE boolean NOT NULL,
IS_NONCONCURRENT boolean NOT NULL,
IS_UPDATE_DATA boolean NOT NULL,
REQUESTS_RECOVERY boolean NOT NULL,
JOB_DATA bytea NULL);
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE TABLE QRTZ_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
NEXT_FIRE_TIME BIGINT NULL,
PREV_FIRE_TIME BIGINT NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE character varying(16) NOT NULL,
TRIGGER_TYPE character varying(8) NOT NULL,
START_TIME BIGINT NOT NULL,
END_TIME BIGINT NULL,
CALENDAR_NAME character varying(200) NULL,
MISFIRE_INSTR SMALLINT NULL,
JOB_DATA bytea NULL) ;
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
REPEAT_COUNT BIGINT NOT NULL,
REPEAT_INTERVAL BIGINT NOT NULL,
TIMES_TRIGGERED BIGINT NOT NULL) ;
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CRON_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
CRON_EXPRESSION character varying(120) NOT NULL,
TIME_ZONE_ID character varying(80)) ;
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPROP_TRIGGERS
(
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
STR_PROP_1 character varying(512) NULL,
STR_PROP_2 character varying(512) NULL,
STR_PROP_3 character varying(512) NULL,
INT_PROP_1 INT NULL,
INT_PROP_2 INT NULL,
LONG_PROP_1 BIGINT NULL,
LONG_PROP_2 BIGINT NULL,
DEC_PROP_1 NUMERIC(13,4) NULL,
DEC_PROP_2 NUMERIC(13,4) NULL,
BOOL_PROP_1 boolean NULL,
BOOL_PROP_2 boolean NULL) ;
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_BLOB_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
BLOB_DATA bytea NULL) ;
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CALENDARS (
SCHED_NAME character varying(120) NOT NULL,
CALENDAR_NAME character varying(200) NOT NULL,
CALENDAR bytea NOT NULL) ;
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME);
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL) ;
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_FIRED_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
ENTRY_ID character varying(95) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
FIRED_TIME BIGINT NOT NULL,
SCHED_TIME BIGINT NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE character varying(16) NOT NULL,
JOB_NAME character varying(200) NULL,
JOB_GROUP character varying(200) NULL,
IS_NONCONCURRENT boolean NULL,
REQUESTS_RECOVERY boolean NULL) ;
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID);
CREATE TABLE QRTZ_SCHEDULER_STATE (
SCHED_NAME character varying(120) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT NOT NULL,
CHECKIN_INTERVAL BIGINT NOT NULL) ;
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME);
CREATE TABLE QRTZ_LOCKS (
SCHED_NAME character varying(120) NOT NULL,
LOCK_NAME character varying(40) NOT NULL) ;
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME);
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
--
-- Table structure for table t_ds_access_token
--
DROP TABLE IF EXISTS t_ds_access_token;
CREATE TABLE t_ds_access_token (
id int NOT NULL ,
user_id int DEFAULT NULL ,
token varchar(64) DEFAULT NULL ,
expire_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alert
--
DROP TABLE IF EXISTS t_ds_alert;
CREATE TABLE t_ds_alert (
id int NOT NULL ,
title varchar(64) DEFAULT NULL ,
show_type int DEFAULT NULL ,
content text ,
alert_type int DEFAULT NULL ,
alert_status int DEFAULT '0' ,
·log· text ,
alertgroup_id int DEFAULT NULL ,
receivers text ,
receivers_cc text ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alertgroup
--
DROP TABLE IF EXISTS t_ds_alertgroup;
CREATE TABLE t_ds_alertgroup (
id int NOT NULL ,
group_name varchar(255) DEFAULT NULL ,
group_type int DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_command
--
DROP TABLE IF EXISTS t_ds_command;
CREATE TABLE t_ds_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
executor_id int DEFAULT NULL ,
dependence varchar(255) DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_datasource
--
DROP TABLE IF EXISTS t_ds_datasource;
CREATE TABLE t_ds_datasource (
id int NOT NULL ,
name varchar(64) NOT NULL ,
note varchar(256) DEFAULT NULL ,
type int NOT NULL ,
user_id int NOT NULL ,
connection_params text NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_error_command
--
DROP TABLE IF EXISTS t_ds_error_command;
CREATE TABLE t_ds_error_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
executor_id int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
dependence text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
message text ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_master_server
--
--
-- Table structure for table t_ds_process_definition
--
DROP TABLE IF EXISTS t_ds_process_definition;
CREATE TABLE t_ds_process_definition (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
release_state int DEFAULT NULL ,
project_id int DEFAULT NULL ,
user_id int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
flag int DEFAULT NULL ,
locations text ,
connects text ,
receivers text ,
receivers_cc text ,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
update_time timestamp DEFAULT NULL ,
modify_by varchar(36) DEFAULT '' ,
resource_ids varchar(64),
PRIMARY KEY (id)
) ;
create index process_definition_index on t_ds_process_definition (project_id,id);
--
-- Table structure for table t_ds_process_instance
--
DROP TABLE IF EXISTS t_ds_process_instance;
CREATE TABLE t_ds_process_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
state int DEFAULT NULL ,
recovery int DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
run_times int DEFAULT NULL ,
host varchar(45) DEFAULT NULL ,
command_type int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
max_try_times int DEFAULT '0' ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
command_start_time timestamp DEFAULT NULL ,
global_params text ,
process_instance_json text ,
flag int DEFAULT '1' ,
update_time timestamp NULL ,
is_sub_process int DEFAULT '0' ,
executor_id int NOT NULL ,
locations text ,
connects text ,
history_cmd text ,
dependence_schedule_times text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64) ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
PRIMARY KEY (id)
) ;
create index process_instance_index on t_ds_process_instance (process_definition_id,id);
create index start_time_index on t_ds_process_instance (start_time);
--
-- Table structure for table t_ds_project
--
DROP TABLE IF EXISTS t_ds_project;
CREATE TABLE t_ds_project (
id int NOT NULL ,
name varchar(100) DEFAULT NULL ,
description varchar(200) DEFAULT NULL ,
user_id int DEFAULT NULL ,
flag int DEFAULT '1' ,
create_time timestamp DEFAULT CURRENT_TIMESTAMP ,
update_time timestamp DEFAULT CURRENT_TIMESTAMP ,
PRIMARY KEY (id)
) ;
create index user_id_index on t_ds_project (user_id);
--
-- Table structure for table t_ds_queue
--
DROP TABLE IF EXISTS t_ds_queue;
CREATE TABLE t_ds_queue (
id int NOT NULL ,
queue_name varchar(64) DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_relation_datasource_user
--
DROP TABLE IF EXISTS t_ds_relation_datasource_user;
CREATE TABLE t_ds_relation_datasource_user (
id int NOT NULL ,
user_id int NOT NULL ,
datasource_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_process_instance
--
DROP TABLE IF EXISTS t_ds_relation_process_instance;
CREATE TABLE t_ds_relation_process_instance (
id int NOT NULL ,
parent_process_instance_id int DEFAULT NULL ,
parent_task_instance_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_project_user
--
DROP TABLE IF EXISTS t_ds_relation_project_user;
CREATE TABLE t_ds_relation_project_user (
id int NOT NULL ,
user_id int NOT NULL ,
project_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index relation_project_user_id_index on t_ds_relation_project_user (user_id);
--
-- Table structure for table t_ds_relation_resources_user
--
DROP TABLE IF EXISTS t_ds_relation_resources_user;
CREATE TABLE t_ds_relation_resources_user (
id int NOT NULL ,
user_id int NOT NULL ,
resources_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_udfs_user
--
DROP TABLE IF EXISTS t_ds_relation_udfs_user;
CREATE TABLE t_ds_relation_udfs_user (
id int NOT NULL ,
user_id int NOT NULL ,
udf_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_user_alertgroup
--
DROP TABLE IF EXISTS t_ds_relation_user_alertgroup;
CREATE TABLE t_ds_relation_user_alertgroup (
id int NOT NULL,
alertgroup_id int DEFAULT NULL,
user_id int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_resources
--
DROP TABLE IF EXISTS t_ds_resources;
CREATE TABLE t_ds_resources (
id int NOT NULL ,
alias varchar(64) DEFAULT NULL ,
file_name varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
user_id int DEFAULT NULL ,
type int DEFAULT NULL ,
size bigint DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
pid int,
full_name varchar(64),
is_directory int,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_schedules
--
DROP TABLE IF EXISTS t_ds_schedules;
CREATE TABLE t_ds_schedules (
id int NOT NULL ,
process_definition_id int NOT NULL ,
start_time timestamp NOT NULL ,
end_time timestamp NOT NULL ,
crontab varchar(256) NOT NULL ,
failure_strategy int NOT NULL ,
user_id int NOT NULL ,
release_state int NOT NULL ,
warning_type int NOT NULL ,
warning_group_id int DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_session
--
DROP TABLE IF EXISTS t_ds_session;
CREATE TABLE t_ds_session (
id varchar(64) NOT NULL ,
user_id int DEFAULT NULL ,
ip varchar(45) DEFAULT NULL ,
last_login_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_task_instance
--
DROP TABLE IF EXISTS t_ds_task_instance;
CREATE TABLE t_ds_task_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
task_type varchar(64) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
task_json text ,
state int DEFAULT NULL ,
submit_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
host varchar(45) DEFAULT NULL ,
execute_path varchar(200) DEFAULT NULL ,
log_path varchar(200) DEFAULT NULL ,
alert_flag int DEFAULT NULL ,
retry_times int DEFAULT '0' ,
pid int DEFAULT NULL ,
app_link varchar(255) DEFAULT NULL ,
flag int DEFAULT '1' ,
retry_interval int DEFAULT NULL ,
max_retry_times int DEFAULT NULL ,
task_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
executor_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_tenant
--
DROP TABLE IF EXISTS t_ds_tenant;
CREATE TABLE t_ds_tenant (
id int NOT NULL ,
tenant_code varchar(64) DEFAULT NULL ,
tenant_name varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
queue_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_udfs
--
DROP TABLE IF EXISTS t_ds_udfs;
CREATE TABLE t_ds_udfs (
id int NOT NULL ,
user_id int NOT NULL ,
func_name varchar(100) NOT NULL ,
class_name varchar(255) NOT NULL ,
type int NOT NULL ,
arg_types varchar(255) DEFAULT NULL ,
database varchar(255) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
resource_id int NOT NULL ,
resource_name varchar(255) NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_user
--
DROP TABLE IF EXISTS t_ds_user;
CREATE TABLE t_ds_user (
id int NOT NULL ,
user_name varchar(64) DEFAULT NULL ,
user_password varchar(64) DEFAULT NULL ,
user_type int DEFAULT NULL ,
email varchar(64) DEFAULT NULL ,
phone varchar(11) DEFAULT NULL ,
tenant_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
state int DEFAULT 1 ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_version
--
DROP TABLE IF EXISTS t_ds_version;
CREATE TABLE t_ds_version (
id int NOT NULL ,
version varchar(200) NOT NULL,
PRIMARY KEY (id)
) ;
create index version_index on t_ds_version(version);
--
-- Table structure for table t_ds_worker_group
--
DROP TABLE IF EXISTS t_ds_worker_group;
CREATE TABLE t_ds_worker_group (
id bigint NOT NULL ,
name varchar(256) DEFAULT NULL ,
ip_list varchar(256) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_worker_server
--
DROP TABLE IF EXISTS t_ds_worker_server;
CREATE TABLE t_ds_worker_server (
id int NOT NULL ,
host varchar(45) DEFAULT NULL ,
port int DEFAULT NULL ,
zk_directory varchar(64) DEFAULT NULL ,
res_info varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
last_heartbeat_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence;
CREATE SEQUENCE t_ds_access_token_id_sequence;
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence;
CREATE SEQUENCE t_ds_alert_id_sequence;
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_alertgroup_id_sequence;
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence;
CREATE SEQUENCE t_ds_command_id_sequence;
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence;
CREATE SEQUENCE t_ds_datasource_id_sequence;
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence;
CREATE SEQUENCE t_ds_process_definition_id_sequence;
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence;
CREATE SEQUENCE t_ds_process_instance_id_sequence;
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence;
CREATE SEQUENCE t_ds_project_id_sequence;
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence;
CREATE SEQUENCE t_ds_queue_id_sequence;
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence;
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence;
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence;
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence;
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence;
CREATE SEQUENCE t_ds_relation_project_user_id_sequence;
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence;
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence;
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence;
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence;
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_user_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_relation_user_alertgroup_id_sequence;
ALTER TABLE t_ds_relation_user_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_user_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence;
CREATE SEQUENCE t_ds_resources_id_sequence;
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence;
CREATE SEQUENCE t_ds_schedules_id_sequence;
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence;
CREATE SEQUENCE t_ds_task_instance_id_sequence;
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence;
CREATE SEQUENCE t_ds_tenant_id_sequence;
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence;
CREATE SEQUENCE t_ds_udfs_id_sequence;
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence;
CREATE SEQUENCE t_ds_user_id_sequence;
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence;
CREATE SEQUENCE t_ds_version_id_sequence;
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence;
CREATE SEQUENCE t_ds_worker_group_id_sequence;
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence;
CREATE SEQUENCE t_ds_worker_server_id_sequence;
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence');
-- Records of t_ds_user?user : admin , password : dolphinscheduler123
INSERT INTO t_ds_user(user_name,user_password,user_type,email,phone,state,tenant_id,create_time,update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', 'xx', 1, '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22');
-- Records of t_ds_alertgroup,dolphinscheduler warning group
INSERT INTO t_ds_alertgroup(group_name,group_type,description,create_time,update_time) VALUES ('dolphinscheduler warning group', '0', 'dolphinscheduler warning group','2018-11-29 10:20:39', '2018-11-29 10:20:39');
INSERT INTO t_ds_relation_user_alertgroup(alertgroup_id,user_id,create_time,update_time) VALUES ( '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_queue(queue_name,queue,create_time,update_time) VALUES ('default', 'default','2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('2.0.0');

2
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java

@ -56,7 +56,7 @@ public class EmailAlertPlugin implements AlertPlugin {
@Override
public String getId() {
return Constants.PLUGIN_DEFAULT_EMAIL;
return Constants.PLUGIN_DEFAULT_EMAIL_ID;
}
@Override

2
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java

@ -80,7 +80,7 @@ public class AlertSender {
alertInfo.addProp("receivers", receviersList);
AlertPlugin emailPlugin = pluginManager.findOne(Constants.PLUGIN_DEFAULT_EMAIL);
AlertPlugin emailPlugin = pluginManager.findOne(Constants.PLUGIN_DEFAULT_EMAIL_ID);
retMaps = emailPlugin.process(alertInfo);
if (retMaps == null) {

2
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java

@ -179,7 +179,7 @@ public class Constants {
*/
public static final String PLUGIN_DIR = "plugin.dir";
public static final String PLUGIN_DEFAULT_EMAIL = "email";
public static final String PLUGIN_DEFAULT_EMAIL_ID = "email";
public static final String PLUGIN_DEFAULT_EMAIL_CH = "邮件";

2
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java

@ -47,7 +47,7 @@ public class EmailAlertPluginTest {
@Test
public void getId() {
String id = plugin.getId();
assertEquals(Constants.PLUGIN_DEFAULT_EMAIL, id);
assertEquals(Constants.PLUGIN_DEFAULT_EMAIL_ID, id);
}
@Test

14
dolphinscheduler-common/pom.xml

@ -594,5 +594,19 @@
<artifactId>janino</artifactId>
<version>${codehaus.janino.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<exclusions>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</project>

4
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/plugin/FilePluginManager.java

@ -90,8 +90,8 @@ public class FilePluginManager implements PluginManager {
}
@Override
public AlertPlugin findOne(String name) {
return pluginMap.get(name);
public AlertPlugin findOne(String pluginId) {
return pluginMap.get(pluginId);
}
@Override

138
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/RetryerUtils.java

@ -0,0 +1,138 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import com.github.rholder.retry.*;
import org.apache.dolphinscheduler.common.Constants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
/**
* The Retryer util.
*/
public class RetryerUtils {
private static final Logger logger = LoggerFactory.getLogger(RetryerUtils.class);
private static Retryer<Boolean> defaultRetryerResultCheck;
private static Retryer<Boolean> defaultRetryerResultNoCheck;
private RetryerUtils() {
}
private static Retryer<Boolean> getDefaultRetryerResultNoCheck() {
if (defaultRetryerResultNoCheck == null) {
defaultRetryerResultNoCheck = RetryerBuilder
.<Boolean>newBuilder()
.retryIfException()
.withWaitStrategy(WaitStrategies.fixedWait(Constants.SLEEP_TIME_MILLIS, TimeUnit.MILLISECONDS))
.withStopStrategy(StopStrategies.stopAfterAttempt(3))
.build();
}
return defaultRetryerResultNoCheck;
}
/**
* Gets default retryer.
* the retryer will retry 3 times if exceptions throw
* and wait 1 second between each retry
*
* @param checkResult true means the callable must return true before retrying
* false means that retry callable only throw exceptions
* @return the default retryer
*/
public static Retryer<Boolean> getDefaultRetryer(boolean checkResult) {
return checkResult ? getDefaultRetryer() : getDefaultRetryerResultNoCheck();
}
/**
* Gets default retryer.
* the retryer will retry 3 times if exceptions throw
* and wait 1 second between each retry
*
* @return the default retryer
*/
public static Retryer<Boolean> getDefaultRetryer() {
if (defaultRetryerResultCheck == null) {
defaultRetryerResultCheck = RetryerBuilder
.<Boolean>newBuilder()
.retryIfResult(Boolean.FALSE::equals)
.retryIfException()
.withWaitStrategy(WaitStrategies.fixedWait(Constants.SLEEP_TIME_MILLIS, TimeUnit.MILLISECONDS))
.withStopStrategy(StopStrategies.stopAfterAttempt(3))
.build();
}
return defaultRetryerResultCheck;
}
/**
* Use RETRYER to invoke the Callable
*
* @param callable the callable
* @param checkResult true means that retry callable before returning true
* false means that retry callable only throw exceptions
* @return the final result of callable
* @throws ExecutionException the execution exception
* @throws RetryException the retry exception
*/
public static Boolean retryCall(final Callable<Boolean> callable, boolean checkResult) throws ExecutionException, RetryException {
return getDefaultRetryer(checkResult).call(callable);
}
/**
* Use RETRYER to invoke the Callable before returning true
*
* @param callable the callable
* @return the boolean
* @throws ExecutionException the execution exception
* @throws RetryException the retry exception
*/
public static Boolean retryCall(final Callable<Boolean> callable) throws ExecutionException, RetryException {
return retryCall(callable, true);
}
/**
* Retry call silent without exceptions thrown
*
* @param callable the callable
* @param checkResult whether check result
* @return if no exceptions ,it's result returned by callable ,else always false
*/
public static boolean retryCallSilent(final Callable<Boolean> callable, boolean checkResult) {
boolean result = false;
try {
result = getDefaultRetryer(checkResult).call(callable);
} catch (ExecutionException | RetryException e) {
logger.warn("Retry call {} failed {}", callable, e.getMessage(), e);
}
return result;
}
/**
* Retry call silent without exceptions thrown
*
* @param callable the callable
* @return if no exceptions ,it's result returned by callable ,else always false
*/
public static boolean retryCallSilent(final Callable<Boolean> callable) {
return retryCallSilent(callable, true);
}
}

325
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/RetryerUtilsTest.java

@ -0,0 +1,325 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import com.github.rholder.retry.RetryException;
import com.github.rholder.retry.Retryer;
import org.junit.Assert;
import org.junit.Test;
import java.util.concurrent.ExecutionException;
public class RetryerUtilsTest {
@Test
public void testDefaultRetryer() {
Retryer<Boolean> retryer = RetryerUtils.getDefaultRetryer();
Assert.assertNotNull(retryer);
try {
boolean result = retryer.call(() -> true);
Assert.assertTrue(result);
} catch (ExecutionException | RetryException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
Retryer<Boolean> retryer1 = RetryerUtils.getDefaultRetryer(true);
Assert.assertEquals(retryer, retryer1);
}
@Test
public void testDefaultRetryerResultCheck() {
Retryer<Boolean> retryer = RetryerUtils.getDefaultRetryer();
Assert.assertNotNull(retryer);
try {
for (int execTarget = 1; execTarget <= 3; execTarget++) {
int finalExecTarget = execTarget;
int[] execTime = {0};
boolean result = retryer.call(() -> {
execTime[0]++;
return execTime[0] == finalExecTarget;
});
Assert.assertEquals(finalExecTarget, execTime[0]);
Assert.assertTrue(result);
}
} catch (ExecutionException | RetryException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
int[] execTime = {0};
try {
retryer.call(() -> {
execTime[0]++;
return execTime[0] == 4;
});
Assert.fail("Retry times not reached");
} catch (RetryException e) {
Assert.assertEquals(3, e.getNumberOfFailedAttempts());
Assert.assertEquals(3, execTime[0]);
} catch (ExecutionException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
}
@Test
public void testDefaultRetryerResultNoCheck() {
Retryer<Boolean> retryer = RetryerUtils.getDefaultRetryer(false);
Assert.assertNotNull(retryer);
try {
for (int execTarget = 1; execTarget <= 5; execTarget++) {
int[] execTime = {0};
boolean result = retryer.call(() -> {
execTime[0]++;
return execTime[0] > 1;
});
Assert.assertEquals(1, execTime[0]);
Assert.assertFalse(result);
}
} catch (ExecutionException | RetryException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
}
@Test
public void testRecallResultCheck() {
try {
for (int execTarget = 1; execTarget <= 3; execTarget++) {
int finalExecTarget = execTarget;
int[] execTime = {0};
boolean result = RetryerUtils.retryCall(() -> {
execTime[0]++;
return execTime[0] == finalExecTarget;
});
Assert.assertEquals(finalExecTarget, execTime[0]);
Assert.assertTrue(result);
}
} catch (ExecutionException | RetryException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
int[] execTime = {0};
try {
RetryerUtils.retryCall(() -> {
execTime[0]++;
return execTime[0] == 4;
});
Assert.fail("Recall times not reached");
} catch (RetryException e) {
Assert.assertEquals(3, e.getNumberOfFailedAttempts());
Assert.assertEquals(3, execTime[0]);
} catch (ExecutionException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
}
@Test
public void testRecallResultCheckWithPara() {
try {
for (int execTarget = 1; execTarget <= 3; execTarget++) {
int finalExecTarget = execTarget;
int[] execTime = {0};
boolean result = RetryerUtils.retryCall(() -> {
execTime[0]++;
return execTime[0] == finalExecTarget;
}, true);
Assert.assertEquals(finalExecTarget, execTime[0]);
Assert.assertTrue(result);
}
} catch (ExecutionException | RetryException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
int[] execTime = {0};
try {
RetryerUtils.retryCall(() -> {
execTime[0]++;
return execTime[0] == 4;
}, true);
Assert.fail("Recall times not reached");
} catch (RetryException e) {
Assert.assertEquals(3, e.getNumberOfFailedAttempts());
Assert.assertEquals(3, execTime[0]);
} catch (ExecutionException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
}
@Test
public void testRecallResultNoCheck() {
try {
for (int execTarget = 1; execTarget <= 5; execTarget++) {
int[] execTime = {0};
boolean result = RetryerUtils.retryCall(() -> {
execTime[0]++;
return execTime[0] > 1;
}, false);
Assert.assertEquals(1, execTime[0]);
Assert.assertFalse(result);
}
} catch (ExecutionException | RetryException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
}
private void testRetryExceptionWithPara(boolean checkResult) {
try {
for (int execTarget = 1; execTarget <= 3; execTarget++) {
int finalExecTarget = execTarget;
int[] execTime = {0};
boolean result = RetryerUtils.retryCall(() -> {
execTime[0]++;
if (execTime[0] != finalExecTarget) {
throw new IllegalArgumentException(String.valueOf(execTime[0]));
}
return true;
}, checkResult);
Assert.assertEquals(finalExecTarget, execTime[0]);
Assert.assertTrue(result);
}
} catch (ExecutionException | RetryException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
int[] execTime = {0};
try {
RetryerUtils.retryCall(() -> {
execTime[0]++;
if (execTime[0] != 4) {
throw new IllegalArgumentException(String.valueOf(execTime[0]));
}
return true;
}, checkResult);
Assert.fail("Recall times not reached");
} catch (RetryException e) {
Assert.assertEquals(3, e.getNumberOfFailedAttempts());
Assert.assertEquals(3, execTime[0]);
Assert.assertNotNull(e.getCause());
Assert.assertEquals(3, Integer.parseInt(e.getCause().getMessage()));
} catch (ExecutionException e) {
Assert.fail("Retry call failed " + e.getMessage());
}
}
@Test
public void testRetryException() {
testRetryExceptionWithPara(true);
testRetryExceptionWithPara(false);
}
@Test
public void testRetrySilent() {
try {
for (int execTarget = 1; execTarget <= 3; execTarget++) {
int finalExecTarget = execTarget;
int[] execTime = {0};
boolean result = RetryerUtils.retryCallSilent(() -> {
execTime[0]++;
return execTime[0] == finalExecTarget;
});
Assert.assertEquals(finalExecTarget, execTime[0]);
Assert.assertTrue(result);
}
} catch (Exception e) {
Assert.fail("Unexpected exception " + e.getMessage());
}
int[] execTime = {0};
try {
boolean result = RetryerUtils.retryCallSilent(() -> {
execTime[0]++;
return execTime[0] == 4;
});
Assert.assertFalse(result);
} catch (Exception e) {
Assert.fail("Unexpected exception " + e.getMessage());
}
}
@Test
public void testRetrySilentWithPara() {
try {
for (int execTarget = 1; execTarget <= 3; execTarget++) {
int finalExecTarget = execTarget;
int[] execTime = {0};
boolean result = RetryerUtils.retryCallSilent(() -> {
execTime[0]++;
return execTime[0] == finalExecTarget;
}, true);
Assert.assertEquals(finalExecTarget, execTime[0]);
Assert.assertTrue(result);
}
} catch (Exception e) {
Assert.fail("Unexpected exception " + e.getMessage());
}
int[] execTime = {0};
try {
boolean result = RetryerUtils.retryCallSilent(() -> {
execTime[0]++;
return execTime[0] == 4;
}, true);
Assert.assertFalse(result);
} catch (Exception e) {
Assert.fail("Unexpected exception " + e.getMessage());
}
}
@Test
public void testRetrySilentNoCheckResult(){
try {
for (int execTarget = 1; execTarget <= 5; execTarget++) {
int[] execTime = {0};
boolean result = RetryerUtils.retryCallSilent(() -> {
execTime[0]++;
return execTime[0] > 1;
}, false);
Assert.assertEquals(1, execTime[0]);
Assert.assertFalse(result);
}
} catch (Exception e) {
Assert.fail("Unexpected exception " + e.getMessage());
}
}
private void testRetrySilentExceptionWithPara(boolean checkResult) {
try {
for (int execTarget = 1; execTarget <= 3; execTarget++) {
int finalExecTarget = execTarget;
int[] execTime = {0};
boolean result = RetryerUtils.retryCallSilent(() -> {
execTime[0]++;
if (execTime[0] != finalExecTarget) {
throw new IllegalArgumentException(String.valueOf(execTime[0]));
}
return true;
}, checkResult);
Assert.assertEquals(finalExecTarget, execTime[0]);
Assert.assertTrue(result);
}
} catch (Exception e) {
Assert.fail("Unexpected exception " + e.getMessage());
}
int[] execTime = {0};
try {
boolean result = RetryerUtils.retryCallSilent(() -> {
execTime[0]++;
if (execTime[0] != 4) {
throw new IllegalArgumentException(String.valueOf(execTime[0]));
}
return true;
}, checkResult);
Assert.assertFalse(result);
} catch (Exception e) {
Assert.fail("Unexpected exception " + e.getMessage());
}
}
@Test
public void testRetrySilentException() {
testRetrySilentExceptionWithPara(true);
testRetrySilentExceptionWithPara(false);
}
}

54
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/dispatch/executor/NettyExecutorManager.java

@ -17,7 +17,6 @@
package org.apache.dolphinscheduler.server.master.dispatch.executor;
import org.apache.commons.collections.CollectionUtils;
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
@ -36,10 +35,7 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.*;
/**
* netty executor manager
@ -87,17 +83,11 @@ public class NettyExecutorManager extends AbstractExecutorManager<Boolean>{
*/
@Override
public Boolean execute(ExecutionContext context) throws ExecuteException {
/**
* all nodes
*/
Set<String> allNodes = getAllNodes(context);
/**
* fail nodes
*/
Set<String> failNodeSet = new HashSet<>();
LinkedList<String> allNodes = new LinkedList<>();
Set<String> nodes = getAllNodes(context);
if (nodes != null) {
allNodes.addAll(nodes);
}
/**
* build command accord executeContext
*/
@ -106,31 +96,27 @@ public class NettyExecutorManager extends AbstractExecutorManager<Boolean>{
/**
* execute task host
*/
Host host = context.getHost();
String startHostAddress = context.getHost().getAddress();
// remove start host address and add it to head
allNodes.remove(startHostAddress);
allNodes.addFirst(startHostAddress);
boolean success = false;
while (!success) {
for (String address : allNodes) {
try {
doExecute(host,command);
Host host = Host.of(address);
doExecute(host, command);
success = true;
context.setHost(host);
break;
} catch (ExecuteException ex) {
logger.error(String.format("execute command : %s error", command), ex);
try {
failNodeSet.add(host.getAddress());
Set<String> tmpAllIps = new HashSet<>(allNodes);
Collection<String> remained = CollectionUtils.subtract(tmpAllIps, failNodeSet);
if (remained != null && remained.size() > 0) {
host = Host.of(remained.iterator().next());
logger.error("retry execute command : {} host : {}", command, host);
} else {
throw new ExecuteException("fail after try all nodes");
}
} catch (Throwable t) {
throw new ExecuteException("fail after try all nodes");
}
logger.error("retry execute command : {} host : {}", command, address);
}
}
if (!success) {
throw new ExecuteException("fail after try all nodes");
}
return success;
}

27
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java

@ -20,11 +20,13 @@ package org.apache.dolphinscheduler.server.worker.processor;
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.sift.SiftingAppender;
import com.alibaba.fastjson.JSONObject;
import com.github.rholder.retry.RetryException;
import io.netty.channel.Channel;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.RetryerUtils;
import org.apache.dolphinscheduler.server.log.TaskLogDiscriminator;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
@ -43,6 +45,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
/**
@ -101,21 +104,19 @@ public class TaskExecuteProcessor implements NettyRequestProcessor {
taskCallbackService.addRemoteChannel(taskExecutionContext.getTaskInstanceId(),
new NettyRemoteChannel(channel, command.getOpaque()));
// tell master that task is in executing
final Command ackCommand = buildAckCommand(taskExecutionContext).convert2Command();
try {
this.doAck(taskExecutionContext);
}catch (Exception e){
ThreadUtils.sleep(Constants.SLEEP_TIME_MILLIS);
this.doAck(taskExecutionContext);
RetryerUtils.retryCall(() -> {
taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(),ackCommand);
return Boolean.TRUE;
});
// submit task
workerExecService.submit(new TaskExecuteThread(taskExecutionContext, taskCallbackService));
} catch (ExecutionException | RetryException e) {
logger.error(e.getMessage(), e);
}
// submit task
workerExecService.submit(new TaskExecuteThread(taskExecutionContext, taskCallbackService));
}
private void doAck(TaskExecutionContext taskExecutionContext){
// tell master that task is in executing
TaskExecuteAckCommand ackCommand = buildAckCommand(taskExecutionContext);
taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(), ackCommand.convert2Command());
}
/**

7
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java

@ -20,6 +20,7 @@ import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.api.ACLProvider;
import org.apache.curator.framework.api.transaction.CuratorOp;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.curator.utils.CloseableUtils;
@ -171,7 +172,11 @@ public class ZookeeperOperator implements InitializingBean {
public void update(final String key, final String value) {
try {
zkClient.inTransaction().check().forPath(key).and().setData().forPath(key, value.getBytes(StandardCharsets.UTF_8)).and().commit();
CuratorOp check = zkClient.transactionOp().check().forPath(key);
CuratorOp setData = zkClient.transactionOp().setData().forPath(key, value.getBytes(StandardCharsets.UTF_8));
zkClient.transaction().forOperations(check, setData);
} catch (Exception ex) {
logger.error("update key : {} , value : {}", key, value, ex);
}

8
pom.xml

@ -118,6 +118,7 @@
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<guava-retry.version>2.0.0</guava-retry.version>
</properties>
<dependencyManagement>
@ -544,6 +545,12 @@
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
@ -771,6 +778,7 @@
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/plugin/FilePluginManagerTest</include>
<include>**/common/plugin/PluginClassLoaderTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>

11
script/scp-hosts.sh

@ -20,6 +20,12 @@ workDir=`dirname $0`
workDir=`cd ${workDir};pwd`
source $workDir/../conf/config/install_config.conf
txt=""
if [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OSX
txt="''"
fi
hostsArr=(${ips//,/ })
for host in ${hostsArr[@]}
do
@ -33,6 +39,11 @@ do
for dsDir in bin conf lib script sql ui install.sh
do
# if worker in workersGroup
if [[ "${map[${host}]}" ]] && [[ "${dsDir}" -eq "conf" ]]; then
sed -i ${txt} "s#worker.group.*#worker.group=${map[${host}]}#g" $workDir/../conf/worker.properties
fi
echo "start to scp $dsDir to $host/$installPath"
scp -P $sshPort -r $workDir/../$dsDir $host:$installPath
done

3
script/start-all.sh

@ -28,8 +28,7 @@ do
done
workersHost=(${workers//,/ })
for worker in ${workersHost[@]}
for worker in ${!workersGroup[*]}
do
echo "$worker worker server is starting"

3
script/stop-all.sh

@ -29,8 +29,7 @@ do
done
workersHost=(${workers//,/ })
for worker in ${workersHost[@]}
for worker in ${!workersGroup[*]}
do
echo "$worker worker server is stopping"
ssh -p $sshPort $worker "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop worker-server;"

3
sql/dolphinscheduler-postgre.sql

@ -623,6 +623,7 @@ CREATE TABLE t_ds_user (
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
state int DEFAULT 1 ,
PRIMARY KEY (id)
);
@ -759,4 +760,4 @@ INSERT INTO t_ds_relation_user_alertgroup(alertgroup_id,user_id,create_time,upda
INSERT INTO t_ds_queue(queue_name,queue,create_time,update_time) VALUES ('default', 'default','2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('2.0.0');
INSERT INTO t_ds_version(version) VALUES ('2.0.0');

1
sql/dolphinscheduler_mysql.sql

@ -775,6 +775,7 @@ CREATE TABLE `t_ds_user` (
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`queue` varchar(64) DEFAULT NULL COMMENT 'queue',
`state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable',
PRIMARY KEY (`id`),
UNIQUE KEY `user_name_unique` (`user_name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;

1
tools/dependencies/known-dependencies.txt

@ -209,3 +209,4 @@ xml-apis-1.4.01.jar
xmlenc-0.52.jar
xz-1.0.jar
zookeeper-3.4.14.jar
guava-retrying-2.0.0.jar

Loading…
Cancel
Save