Browse Source

Merge branch 'dev' into dev

pull/3/MERGE
dailidong 5 years ago committed by GitHub
parent
commit
d9f8a5c67c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 13
      .github/workflows/ci_e2e.yml
  2. 10
      .github/workflows/ci_ut.yml
  3. 36
      docker/build/Dockerfile
  4. 6
      docker/build/README.md
  5. 6
      docker/build/README_zh_CN.md
  6. 0
      docker/build/checkpoint.sh
  7. 0
      docker/build/conf/dolphinscheduler/alert.properties.tpl
  8. 0
      docker/build/conf/dolphinscheduler/application-api.properties.tpl
  9. 0
      docker/build/conf/dolphinscheduler/common.properties.tpl
  10. 0
      docker/build/conf/dolphinscheduler/datasource.properties.tpl
  11. 0
      docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh
  12. 0
      docker/build/conf/dolphinscheduler/logback/logback-alert.xml
  13. 0
      docker/build/conf/dolphinscheduler/logback/logback-api.xml
  14. 0
      docker/build/conf/dolphinscheduler/logback/logback-master.xml
  15. 0
      docker/build/conf/dolphinscheduler/logback/logback-worker.xml
  16. 0
      docker/build/conf/dolphinscheduler/master.properties.tpl
  17. 0
      docker/build/conf/dolphinscheduler/quartz.properties.tpl
  18. 0
      docker/build/conf/dolphinscheduler/worker.properties.tpl
  19. 0
      docker/build/conf/dolphinscheduler/zookeeper.properties.tpl
  20. 0
      docker/build/conf/nginx/dolphinscheduler.conf
  21. 0
      docker/build/conf/zookeeper/zoo.cfg
  22. 14
      docker/build/hooks/build
  23. 12
      docker/build/hooks/build.bat
  24. 0
      docker/build/hooks/push
  25. 0
      docker/build/hooks/push.bat
  26. 0
      docker/build/startup-init-conf.sh
  27. 53
      docker/build/startup.sh
  28. 40
      docker/docker-compose.yml
  29. 2
      docker/docker-swarm/check
  30. 47
      docker/docker-swarm/docker-compose.yml
  31. 27
      docker/docker-swarm/docker-stack.yml
  32. 6
      docker/kubernetes/dolphinscheduler/values.yaml
  33. 763
      docker/postgres/docker-entrypoint-initdb/init.sql
  34. 53
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/DingTalkManager.java
  35. 8
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java
  36. 18
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
  37. 136
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/DingTalkUtils.java
  38. 11
      dolphinscheduler-alert/src/main/resources/alert.properties
  39. 125
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/DingTalkUtilsTest.java
  40. 1
      pom.xml
  41. 2
      script/scp-hosts.sh
  42. 3
      sql/dolphinscheduler-postgre.sql
  43. 1
      sql/dolphinscheduler_mysql.sql

13
.github/workflows/ci_e2e.yml

@ -44,15 +44,14 @@ jobs:
${{ runner.os }}-maven-
- name: Build Image
run: |
export VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'`
sh ./dockerfile/hooks/build
sh ./docker/build/hooks/build
- name: Docker Run
run: |
VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'`
mkdir -p /tmp/logs
docker run -dit -e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -v /tmp/logs:/opt/dolphinscheduler/logs -p 8888:8888 dolphinscheduler:$VERSION all
export VERSION=$(cat $(pwd)/pom.xml | grep '<version>' -m 1 | awk '{print $1}' | sed 's/<version>//' | sed 's/<\/version>//')
sed -i "s/apache\/dolphinscheduler:latest/apache\/dolphinscheduler:${VERSION}/g" $(pwd)/docker/docker-swarm/docker-compose.yml
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml up -d
- name: Check Server Status
run: sh ./dockerfile/hooks/check
run: sh $(pwd)/docker/docker-swarm/check
- name: Prepare e2e env
run: |
sudo apt-get install -y libxss1 libappindicator1 libindicator7 xvfb unzip libgbm1
@ -70,6 +69,6 @@ jobs:
uses: actions/upload-artifact@v1
with:
name: dslogs
path: /tmp/logs
path: /var/lib/docker/volumes/docker-swarm_dolphinscheduler-logs/_data

10
.github/workflows/ci_ut.yml

@ -21,7 +21,6 @@ on:
branches:
- dev
env:
DOCKER_DIR: ./docker
LOG_DIR: /tmp/dolphinscheduler
name: Unit Test
@ -47,7 +46,11 @@ jobs:
restore-keys: |
${{ runner.os }}-maven-
- name: Bootstrap database
run: cd ${DOCKER_DIR} && docker-compose up -d
run: |
sed -i "s/: root/: test/g" $(pwd)/docker/docker-swarm/docker-compose.yml
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml create --force-recreate dolphinscheduler-zookeeper dolphinscheduler-postgresql
sudo cp $(pwd)/sql/dolphinscheduler-postgre.sql $(docker volume inspect docker-swarm_dolphinscheduler-postgresql-initdb | grep "Mountpoint" | awk -F "\"" '{print $4}')
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml up -d dolphinscheduler-zookeeper dolphinscheduler-postgresql
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
@ -82,6 +85,5 @@ jobs:
- name: Collect logs
run: |
mkdir -p ${LOG_DIR}
cd ${DOCKER_DIR}
docker-compose logs db > ${LOG_DIR}/db.txt
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml logs dolphinscheduler-postgresql > ${LOG_DIR}/db.txt
continue-on-error: true

36
dockerfile/Dockerfile → docker/build/Dockerfile

@ -37,32 +37,19 @@ RUN apk add openjdk8
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
ENV PATH $JAVA_HOME/bin:$PATH
#3. install zk
RUN cd /opt && \
wget https://downloads.apache.org/zookeeper/zookeeper-3.5.7/apache-zookeeper-3.5.7-bin.tar.gz && \
tar -zxvf apache-zookeeper-3.5.7-bin.tar.gz && \
mv apache-zookeeper-3.5.7-bin zookeeper && \
mkdir -p /tmp/zookeeper && \
rm -rf ./zookeeper-*tar.gz && \
rm -rf /opt/zookeeper/conf/zoo_sample.cfg
ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
ENV ZK_HOME /opt/zookeeper
ENV PATH $ZK_HOME/bin:$PATH
#3. add dolphinscheduler
ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz /opt/
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin/ /opt/dolphinscheduler/
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#4. install pg
RUN apk add postgresql postgresql-contrib
#5. add dolphinscheduler
ADD ./apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz /opt/
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin/ /opt/dolphinscheduler/
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#6. modify nginx
#5. modify nginx
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \
rm -rf /etc/nginx/conf.d/*
ADD ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
#7. add configuration and modify permissions and set soft links
#6. add configuration and modify permissions and set soft links
ADD ./checkpoint.sh /root/checkpoint.sh
ADD ./startup-init-conf.sh /root/startup-init-conf.sh
ADD ./startup.sh /root/startup.sh
@ -75,22 +62,21 @@ RUN chmod +x /root/checkpoint.sh && \
chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
chmod +x /opt/dolphinscheduler/script/*.sh && \
chmod +x /opt/dolphinscheduler/bin/*.sh && \
chmod +x /opt/zookeeper/bin/*.sh && \
dos2unix /root/checkpoint.sh && \
dos2unix /root/startup-init-conf.sh && \
dos2unix /root/startup.sh && \
dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
dos2unix /opt/dolphinscheduler/script/*.sh && \
dos2unix /opt/dolphinscheduler/bin/*.sh && \
dos2unix /opt/zookeeper/bin/*.sh && \
rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls
#8. remove apk index cache
RUN rm -rf /var/cache/apk/*
#7. remove apk index cache and disable coredup for sudo
RUN rm -rf /var/cache/apk/* && \
echo "Set disable_coredump false" >> /etc/sudo.conf
#9. expose port
#8. expose port
EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]

6
dockerfile/README.md → docker/build/README.md

@ -109,16 +109,16 @@ In Unix-Like, Example:
```bash
$ cd path/incubator-dolphinscheduler
$ sh ./dockerfile/hooks/build
$ sh ./docker/build/hooks/build
```
In Windows, Example:
```bat
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat
c:\incubator-dolphinscheduler>.\docker\build\hooks\build.bat
```
Please read `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat` script files if you don't understand
Please read `./docker/build/hooks/build` `./docker/build/hooks/build.bat` script files if you don't understand
## Environment Variables

6
dockerfile/README_zh_CN.md → docker/build/README_zh_CN.md

@ -109,16 +109,16 @@ dolphinscheduler frontend
```bash
$ cd path/incubator-dolphinscheduler
$ sh ./dockerfile/hooks/build
$ sh ./docker/build/hooks/build
```
Windows系统, 如下:
```bat
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat
c:\incubator-dolphinscheduler>.\docker\build\hooks\build.bat
```
如果你不理解这些脚本 `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat`,请阅读里面的内容。
如果你不理解这些脚本 `./docker/build/hooks/build` `./docker/build/hooks/build.bat`,请阅读里面的内容。
## 环境变量

0
dockerfile/checkpoint.sh → docker/build/checkpoint.sh

0
dockerfile/conf/dolphinscheduler/alert.properties.tpl → docker/build/conf/dolphinscheduler/alert.properties.tpl

0
dockerfile/conf/dolphinscheduler/application-api.properties.tpl → docker/build/conf/dolphinscheduler/application-api.properties.tpl

0
dockerfile/conf/dolphinscheduler/common.properties.tpl → docker/build/conf/dolphinscheduler/common.properties.tpl

0
dockerfile/conf/dolphinscheduler/datasource.properties.tpl → docker/build/conf/dolphinscheduler/datasource.properties.tpl

0
dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env.sh → docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh vendored

0
dockerfile/conf/dolphinscheduler/logback/logback-alert.xml → docker/build/conf/dolphinscheduler/logback/logback-alert.xml

0
dockerfile/conf/dolphinscheduler/logback/logback-api.xml → docker/build/conf/dolphinscheduler/logback/logback-api.xml

0
dockerfile/conf/dolphinscheduler/logback/logback-master.xml → docker/build/conf/dolphinscheduler/logback/logback-master.xml

0
dockerfile/conf/dolphinscheduler/logback/logback-worker.xml → docker/build/conf/dolphinscheduler/logback/logback-worker.xml

0
dockerfile/conf/dolphinscheduler/master.properties.tpl → docker/build/conf/dolphinscheduler/master.properties.tpl

0
dockerfile/conf/dolphinscheduler/quartz.properties.tpl → docker/build/conf/dolphinscheduler/quartz.properties.tpl

0
dockerfile/conf/dolphinscheduler/worker.properties.tpl → docker/build/conf/dolphinscheduler/worker.properties.tpl

0
dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl → docker/build/conf/dolphinscheduler/zookeeper.properties.tpl

0
dockerfile/conf/nginx/dolphinscheduler.conf → docker/build/conf/nginx/dolphinscheduler.conf

0
dockerfile/conf/zookeeper/zoo.cfg → docker/build/conf/zookeeper/zoo.cfg

14
dockerfile/hooks/build → docker/build/hooks/build

@ -24,13 +24,13 @@ printenv
if [ -z "${VERSION}" ]
then
echo "set default environment variable [VERSION]"
VERSION=$(cat $(pwd)/sql/soft_version)
export VERSION=$(cat $(pwd)/pom.xml | grep '<version>' -m 1 | awk '{print $1}' | sed 's/<version>//' | sed 's/<\/version>//')
fi
if [ "${DOCKER_REPO}x" = "x" ]
then
echo "set default environment variable [DOCKER_REPO]"
DOCKER_REPO='dolphinscheduler'
export DOCKER_REPO='apache/dolphinscheduler'
fi
echo "Version: $VERSION"
@ -42,12 +42,12 @@ echo -e "Current Directory is $(pwd)\n"
echo -e "mvn -B clean compile package -Prelease -Dmaven.test.skip=true"
mvn -B clean compile package -Prelease -Dmaven.test.skip=true
# mv dolphinscheduler-bin.tar.gz file to dockerfile directory
echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz $(pwd)/dockerfile/\n"
mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz $(pwd)/dockerfile/
# mv dolphinscheduler-bin.tar.gz file to docker/build directory
echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz $(pwd)/docker/build/\n"
mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz $(pwd)/docker/build/
# docker build
echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/dockerfile/\n"
docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/dockerfile/
echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/\n"
sudo docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/
echo "------ dolphinscheduler end - build -------"

12
dockerfile/hooks/build.bat → docker/build/hooks/build.bat

@ -38,13 +38,13 @@ echo "call mvn clean compile package -Prelease"
call mvn clean compile package -Prelease -DskipTests=true
if "%errorlevel%"=="1" goto :mvnFailed
:: move dolphinscheduler-bin.tar.gz file to dockerfile directory
echo "move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\"
move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\dockerfile\
:: move dolphinscheduler-bin.tar.gz file to docker/build directory
echo "move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\docker\build\"
move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\docker\build\
:: docker build
echo "docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\"
docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\dockerfile\
echo "docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\docker\build\"
docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\docker\build\
if "%errorlevel%"=="1" goto :dockerBuildFailed
echo "------ dolphinscheduler end - build -------"
@ -53,4 +53,4 @@ echo "------ dolphinscheduler end - build -------"
echo "MAVEN PACKAGE FAILED!"
:dockerBuildFailed
echo "DOCKER BUILD FAILED!"
echo "DOCKER BUILD FAILED!"

0
dockerfile/hooks/push → docker/build/hooks/push

0
dockerfile/hooks/push.bat → docker/build/hooks/push.bat

0
dockerfile/startup-init-conf.sh → docker/build/startup-init-conf.sh

53
dockerfile/startup.sh → docker/build/startup.sh

@ -24,31 +24,6 @@ DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
# start postgresql
initPostgreSQL() {
echo "checking postgresql"
if [[ "${POSTGRESQL_HOST}" = "127.0.0.1" || "${POSTGRESQL_HOST}" = "localhost" ]]; then
export PGPORT=${POSTGRESQL_PORT}
echo "start postgresql service"
rc-service postgresql restart
# role if not exists, create
flag=$(sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='${POSTGRESQL_USERNAME}'")
if [ -z "${flag}" ]; then
echo "create user"
sudo -u postgres psql -tAc "create user ${POSTGRESQL_USERNAME} with password '${POSTGRESQL_PASSWORD}'"
fi
# database if not exists, create
flag=$(sudo -u postgres psql -tAc "select 1 from pg_database where datname='dolphinscheduler'")
if [ -z "${flag}" ]; then
echo "init db"
sudo -u postgres psql -tAc "create database dolphinscheduler owner ${POSTGRESQL_USERNAME}"
fi
# grant
sudo -u postgres psql -tAc "grant all privileges on database dolphinscheduler to ${POSTGRESQL_USERNAME}"
fi
echo "test postgresql service"
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
counter=$((counter+1))
@ -73,24 +48,18 @@ initPostgreSQL() {
# start zk
initZK() {
echo -e "checking zookeeper"
if [[ "${ZOOKEEPER_QUORUM}" = "127.0.0.1:2181" || "${ZOOKEEPER_QUORUM}" = "localhost:2181" ]]; then
echo "start local zookeeper"
/opt/zookeeper/bin/zkServer.sh restart
else
echo "connect remote zookeeper"
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 5
done
echo "connect remote zookeeper"
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 5
done
fi
done
}
# start nginx

40
docker/docker-compose.yml

@ -1,40 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: '2'
services:
zookeeper:
image: zookeeper
restart: always
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOO_MY_ID: 1
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
db:
image: postgres
container_name: postgres
environment:
- POSTGRES_USER=test
- POSTGRES_PASSWORD=test
- POSTGRES_DB=dolphinscheduler
ports:
- "5432:5432"
volumes:
- pgdata:/var/lib/postgresql/data
- ./postgres/docker-entrypoint-initdb:/docker-entrypoint-initdb.d
volumes:
pgdata:

2
dockerfile/hooks/check → docker/docker-swarm/check

@ -17,7 +17,7 @@
#
echo "------ dolphinscheduler check - server - status -------"
sleep 60
server_num=$(docker top `docker container list | grep '/sbin/tini' | awk '{print $1}'`| grep java | grep "dolphinscheduler" | awk -F 'classpath ' '{print $2}' | awk '{print $2}' | sort | uniq -c | wc -l)
server_num=$(docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml top | grep java | grep "dolphinscheduler" | awk -F 'classpath ' '{print $2}' | awk '{print $2}' | sort | uniq -c | wc -l)
if [ $server_num -eq 5 ]
then
echo "Server all start successfully"

47
docker/docker-swarm/docker-compose.yml

@ -30,6 +30,7 @@ services:
POSTGRESQL_DATABASE: dolphinscheduler
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
- dolphinscheduler-postgresql-initdb:/docker-entrypoint-initdb.d
networks:
- dolphinscheduler
@ -41,13 +42,14 @@ services:
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
- dolphinscheduler
dolphinscheduler-api:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-api
command: ["api-server"]
ports:
@ -70,12 +72,12 @@ services:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-api:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-frontend:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-frontend
command: ["frontend"]
ports:
@ -93,12 +95,12 @@ services:
depends_on:
- dolphinscheduler-api
volumes:
- dolphinscheduler-frontend:/var/log/nginx
- dolphinscheduler-logs:/var/log/nginx
networks:
- dolphinscheduler
dolphinscheduler-alert:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-alert
command: ["alert-server"]
environment:
@ -130,18 +132,18 @@ services:
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
volumes:
- dolphinscheduler-alert:/opt/dolphinscheduler/logs
networks:
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-master:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-master
command: ["master-server"]
ports:
ports:
- 5678:5678
environment:
environment:
TZ: Asia/Shanghai
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
@ -162,22 +164,22 @@ services:
timeout: 5s
retries: 3
start_period: 30s
depends_on:
depends_on:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-master:/opt/dolphinscheduler/logs
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-worker:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-worker
command: ["worker-server"]
ports:
ports:
- 1234:1234
- 50051:50051
environment:
environment:
TZ: Asia/Shanghai
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
@ -209,7 +211,7 @@ services:
source: dolphinscheduler-worker-data
target: /tmp/dolphinscheduler
- type: volume
source: dolphinscheduler-worker-logs
source: dolphinscheduler-logs
target: /opt/dolphinscheduler/logs
networks:
- dolphinscheduler
@ -220,13 +222,10 @@ networks:
volumes:
dolphinscheduler-postgresql:
dolphinscheduler-postgresql-initdb:
dolphinscheduler-zookeeper:
dolphinscheduler-api:
dolphinscheduler-frontend:
dolphinscheduler-alert:
dolphinscheduler-master:
dolphinscheduler-worker-data:
dolphinscheduler-worker-logs:
dolphinscheduler-logs:
configs:
dolphinscheduler-worker-task-env:

27
docker/docker-swarm/docker-stack.yml

@ -42,6 +42,7 @@ services:
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
@ -51,7 +52,7 @@ services:
replicas: 1
dolphinscheduler-api:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["api-server"]
ports:
- 12345:12345
@ -70,7 +71,7 @@ services:
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-api:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
@ -78,7 +79,7 @@ services:
replicas: 1
dolphinscheduler-frontend:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["frontend"]
ports:
- 8888:8888
@ -93,7 +94,7 @@ services:
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-frontend:/var/log/nginx
- dolphinscheduler-logs:/var/log/nginx
networks:
- dolphinscheduler
deploy:
@ -101,7 +102,7 @@ services:
replicas: 1
dolphinscheduler-alert:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["alert-server"]
environment:
TZ: Asia/Shanghai
@ -131,7 +132,7 @@ services:
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-alert:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
@ -139,7 +140,7 @@ services:
replicas: 1
dolphinscheduler-master:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["master-server"]
ports:
- 5678:5678
@ -165,7 +166,7 @@ services:
retries: 3
start_period: 30s
volumes:
- dolphinscheduler-master:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
deploy:
@ -173,7 +174,7 @@ services:
replicas: 1
dolphinscheduler-worker:
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
image: apache/dolphinscheduler:latest
command: ["worker-server"]
ports:
- 1234:1234
@ -201,7 +202,7 @@ services:
start_period: 30s
volumes:
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
- dolphinscheduler-worker-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
configs:
- source: dolphinscheduler-worker-task-env
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
@ -218,12 +219,8 @@ networks:
volumes:
dolphinscheduler-postgresql:
dolphinscheduler-zookeeper:
dolphinscheduler-api:
dolphinscheduler-frontend:
dolphinscheduler-alert:
dolphinscheduler-master:
dolphinscheduler-worker-data:
dolphinscheduler-worker-logs:
dolphinscheduler-logs:
configs:
dolphinscheduler-worker-task-env:

6
docker/kubernetes/dolphinscheduler/values.yaml

@ -25,9 +25,9 @@ fullnameOverride: ""
timezone: "Asia/Shanghai"
image:
registry: "docker.io"
registry: "apache"
repository: "dolphinscheduler"
tag: "1.3.0"
tag: "latest"
pullPolicy: "IfNotPresent"
imagePullSecrets: []
@ -56,6 +56,8 @@ externalDatabase:
zookeeper:
enabled: true
taskQueue: "zookeeper"
config:
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
service:
port: "2181"
persistence:

763
docker/postgres/docker-entrypoint-initdb/init.sql

@ -1,763 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
DROP TABLE IF EXISTS QRTZ_LOCKS;
DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_TRIGGERS;
DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
DROP TABLE IF EXISTS QRTZ_CALENDARS;
CREATE TABLE QRTZ_JOB_DETAILS(
SCHED_NAME character varying(120) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
JOB_CLASS_NAME character varying(250) NOT NULL,
IS_DURABLE boolean NOT NULL,
IS_NONCONCURRENT boolean NOT NULL,
IS_UPDATE_DATA boolean NOT NULL,
REQUESTS_RECOVERY boolean NOT NULL,
JOB_DATA bytea NULL);
alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE TABLE QRTZ_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
JOB_NAME character varying(200) NOT NULL,
JOB_GROUP character varying(200) NOT NULL,
DESCRIPTION character varying(250) NULL,
NEXT_FIRE_TIME BIGINT NULL,
PREV_FIRE_TIME BIGINT NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE character varying(16) NOT NULL,
TRIGGER_TYPE character varying(8) NOT NULL,
START_TIME BIGINT NOT NULL,
END_TIME BIGINT NULL,
CALENDAR_NAME character varying(200) NULL,
MISFIRE_INSTR SMALLINT NULL,
JOB_DATA bytea NULL) ;
alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
REPEAT_COUNT BIGINT NOT NULL,
REPEAT_INTERVAL BIGINT NOT NULL,
TIMES_TRIGGERED BIGINT NOT NULL) ;
alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CRON_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
CRON_EXPRESSION character varying(120) NOT NULL,
TIME_ZONE_ID character varying(80)) ;
alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_SIMPROP_TRIGGERS
(
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
STR_PROP_1 character varying(512) NULL,
STR_PROP_2 character varying(512) NULL,
STR_PROP_3 character varying(512) NULL,
INT_PROP_1 INT NULL,
INT_PROP_2 INT NULL,
LONG_PROP_1 BIGINT NULL,
LONG_PROP_2 BIGINT NULL,
DEC_PROP_1 NUMERIC(13,4) NULL,
DEC_PROP_2 NUMERIC(13,4) NULL,
BOOL_PROP_1 boolean NULL,
BOOL_PROP_2 boolean NULL) ;
alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_BLOB_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
BLOB_DATA bytea NULL) ;
alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_CALENDARS (
SCHED_NAME character varying(120) NOT NULL,
CALENDAR_NAME character varying(200) NOT NULL,
CALENDAR bytea NOT NULL) ;
alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME);
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
SCHED_NAME character varying(120) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL) ;
alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP);
CREATE TABLE QRTZ_FIRED_TRIGGERS (
SCHED_NAME character varying(120) NOT NULL,
ENTRY_ID character varying(95) NOT NULL,
TRIGGER_NAME character varying(200) NOT NULL,
TRIGGER_GROUP character varying(200) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
FIRED_TIME BIGINT NOT NULL,
SCHED_TIME BIGINT NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE character varying(16) NOT NULL,
JOB_NAME character varying(200) NULL,
JOB_GROUP character varying(200) NULL,
IS_NONCONCURRENT boolean NULL,
REQUESTS_RECOVERY boolean NULL) ;
alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID);
CREATE TABLE QRTZ_SCHEDULER_STATE (
SCHED_NAME character varying(120) NOT NULL,
INSTANCE_NAME character varying(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT NOT NULL,
CHECKIN_INTERVAL BIGINT NOT NULL) ;
alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME);
CREATE TABLE QRTZ_LOCKS (
SCHED_NAME character varying(120) NOT NULL,
LOCK_NAME character varying(40) NOT NULL) ;
alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME);
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
--
-- Table structure for table t_ds_access_token
--
DROP TABLE IF EXISTS t_ds_access_token;
CREATE TABLE t_ds_access_token (
id int NOT NULL ,
user_id int DEFAULT NULL ,
token varchar(64) DEFAULT NULL ,
expire_time timestamp DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alert
--
DROP TABLE IF EXISTS t_ds_alert;
CREATE TABLE t_ds_alert (
id int NOT NULL ,
title varchar(64) DEFAULT NULL ,
show_type int DEFAULT NULL ,
content text ,
alert_type int DEFAULT NULL ,
alert_status int DEFAULT '0' ,
·log· text ,
alertgroup_id int DEFAULT NULL ,
receivers text ,
receivers_cc text ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_alertgroup
--
DROP TABLE IF EXISTS t_ds_alertgroup;
CREATE TABLE t_ds_alertgroup (
id int NOT NULL ,
group_name varchar(255) DEFAULT NULL ,
group_type int DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_command
--
DROP TABLE IF EXISTS t_ds_command;
CREATE TABLE t_ds_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
executor_id int DEFAULT NULL ,
dependence varchar(255) DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_datasource
--
DROP TABLE IF EXISTS t_ds_datasource;
CREATE TABLE t_ds_datasource (
id int NOT NULL ,
name varchar(64) NOT NULL ,
note varchar(256) DEFAULT NULL ,
type int NOT NULL ,
user_id int NOT NULL ,
connection_params text NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_error_command
--
DROP TABLE IF EXISTS t_ds_error_command;
CREATE TABLE t_ds_error_command (
id int NOT NULL ,
command_type int DEFAULT NULL ,
executor_id int DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
dependence text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
message text ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_master_server
--
--
-- Table structure for table t_ds_process_definition
--
DROP TABLE IF EXISTS t_ds_process_definition;
CREATE TABLE t_ds_process_definition (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
version int DEFAULT NULL ,
release_state int DEFAULT NULL ,
project_id int DEFAULT NULL ,
user_id int DEFAULT NULL ,
process_definition_json text ,
description text ,
global_params text ,
flag int DEFAULT NULL ,
locations text ,
connects text ,
receivers text ,
receivers_cc text ,
create_time timestamp DEFAULT NULL ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
update_time timestamp DEFAULT NULL ,
modify_by varchar(36) DEFAULT '' ,
resource_ids varchar(64),
PRIMARY KEY (id)
) ;
create index process_definition_index on t_ds_process_definition (project_id,id);
--
-- Table structure for table t_ds_process_instance
--
DROP TABLE IF EXISTS t_ds_process_instance;
CREATE TABLE t_ds_process_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
state int DEFAULT NULL ,
recovery int DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
run_times int DEFAULT NULL ,
host varchar(45) DEFAULT NULL ,
command_type int DEFAULT NULL ,
command_param text ,
task_depend_type int DEFAULT NULL ,
max_try_times int DEFAULT '0' ,
failure_strategy int DEFAULT '0' ,
warning_type int DEFAULT '0' ,
warning_group_id int DEFAULT NULL ,
schedule_time timestamp DEFAULT NULL ,
command_start_time timestamp DEFAULT NULL ,
global_params text ,
process_instance_json text ,
flag int DEFAULT '1' ,
update_time timestamp NULL ,
is_sub_process int DEFAULT '0' ,
executor_id int NOT NULL ,
locations text ,
connects text ,
history_cmd text ,
dependence_schedule_times text ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64) ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
PRIMARY KEY (id)
) ;
create index process_instance_index on t_ds_process_instance (process_definition_id,id);
create index start_time_index on t_ds_process_instance (start_time);
--
-- Table structure for table t_ds_project
--
DROP TABLE IF EXISTS t_ds_project;
CREATE TABLE t_ds_project (
id int NOT NULL ,
name varchar(100) DEFAULT NULL ,
description varchar(200) DEFAULT NULL ,
user_id int DEFAULT NULL ,
flag int DEFAULT '1' ,
create_time timestamp DEFAULT CURRENT_TIMESTAMP ,
update_time timestamp DEFAULT CURRENT_TIMESTAMP ,
PRIMARY KEY (id)
) ;
create index user_id_index on t_ds_project (user_id);
--
-- Table structure for table t_ds_queue
--
DROP TABLE IF EXISTS t_ds_queue;
CREATE TABLE t_ds_queue (
id int NOT NULL ,
queue_name varchar(64) DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_relation_datasource_user
--
DROP TABLE IF EXISTS t_ds_relation_datasource_user;
CREATE TABLE t_ds_relation_datasource_user (
id int NOT NULL ,
user_id int NOT NULL ,
datasource_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_process_instance
--
DROP TABLE IF EXISTS t_ds_relation_process_instance;
CREATE TABLE t_ds_relation_process_instance (
id int NOT NULL ,
parent_process_instance_id int DEFAULT NULL ,
parent_task_instance_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_project_user
--
DROP TABLE IF EXISTS t_ds_relation_project_user;
CREATE TABLE t_ds_relation_project_user (
id int NOT NULL ,
user_id int NOT NULL ,
project_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
create index relation_project_user_id_index on t_ds_relation_project_user (user_id);
--
-- Table structure for table t_ds_relation_resources_user
--
DROP TABLE IF EXISTS t_ds_relation_resources_user;
CREATE TABLE t_ds_relation_resources_user (
id int NOT NULL ,
user_id int NOT NULL ,
resources_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_relation_udfs_user
--
DROP TABLE IF EXISTS t_ds_relation_udfs_user;
CREATE TABLE t_ds_relation_udfs_user (
id int NOT NULL ,
user_id int NOT NULL ,
udf_id int DEFAULT NULL ,
perm int DEFAULT '1' ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_relation_user_alertgroup
--
DROP TABLE IF EXISTS t_ds_relation_user_alertgroup;
CREATE TABLE t_ds_relation_user_alertgroup (
id int NOT NULL,
alertgroup_id int DEFAULT NULL,
user_id int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_resources
--
DROP TABLE IF EXISTS t_ds_resources;
CREATE TABLE t_ds_resources (
id int NOT NULL ,
alias varchar(64) DEFAULT NULL ,
file_name varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
user_id int DEFAULT NULL ,
type int DEFAULT NULL ,
size bigint DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
pid int,
full_name varchar(64),
is_directory int,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_schedules
--
DROP TABLE IF EXISTS t_ds_schedules;
CREATE TABLE t_ds_schedules (
id int NOT NULL ,
process_definition_id int NOT NULL ,
start_time timestamp NOT NULL ,
end_time timestamp NOT NULL ,
crontab varchar(256) NOT NULL ,
failure_strategy int NOT NULL ,
user_id int NOT NULL ,
release_state int NOT NULL ,
warning_type int NOT NULL ,
warning_group_id int DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_session
--
DROP TABLE IF EXISTS t_ds_session;
CREATE TABLE t_ds_session (
id varchar(64) NOT NULL ,
user_id int DEFAULT NULL ,
ip varchar(45) DEFAULT NULL ,
last_login_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_task_instance
--
DROP TABLE IF EXISTS t_ds_task_instance;
CREATE TABLE t_ds_task_instance (
id int NOT NULL ,
name varchar(255) DEFAULT NULL ,
task_type varchar(64) DEFAULT NULL ,
process_definition_id int DEFAULT NULL ,
process_instance_id int DEFAULT NULL ,
task_json text ,
state int DEFAULT NULL ,
submit_time timestamp DEFAULT NULL ,
start_time timestamp DEFAULT NULL ,
end_time timestamp DEFAULT NULL ,
host varchar(45) DEFAULT NULL ,
execute_path varchar(200) DEFAULT NULL ,
log_path varchar(200) DEFAULT NULL ,
alert_flag int DEFAULT NULL ,
retry_times int DEFAULT '0' ,
pid int DEFAULT NULL ,
app_link varchar(255) DEFAULT NULL ,
flag int DEFAULT '1' ,
retry_interval int DEFAULT NULL ,
max_retry_times int DEFAULT NULL ,
task_instance_priority int DEFAULT NULL ,
worker_group varchar(64),
executor_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_tenant
--
DROP TABLE IF EXISTS t_ds_tenant;
CREATE TABLE t_ds_tenant (
id int NOT NULL ,
tenant_code varchar(64) DEFAULT NULL ,
tenant_name varchar(64) DEFAULT NULL ,
description varchar(256) DEFAULT NULL ,
queue_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_udfs
--
DROP TABLE IF EXISTS t_ds_udfs;
CREATE TABLE t_ds_udfs (
id int NOT NULL ,
user_id int NOT NULL ,
func_name varchar(100) NOT NULL ,
class_name varchar(255) NOT NULL ,
type int NOT NULL ,
arg_types varchar(255) DEFAULT NULL ,
database varchar(255) DEFAULT NULL ,
description varchar(255) DEFAULT NULL ,
resource_id int NOT NULL ,
resource_name varchar(255) NOT NULL ,
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_user
--
DROP TABLE IF EXISTS t_ds_user;
CREATE TABLE t_ds_user (
id int NOT NULL ,
user_name varchar(64) DEFAULT NULL ,
user_password varchar(64) DEFAULT NULL ,
user_type int DEFAULT NULL ,
email varchar(64) DEFAULT NULL ,
phone varchar(11) DEFAULT NULL ,
tenant_id int DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
state int DEFAULT 1 ,
PRIMARY KEY (id)
);
--
-- Table structure for table t_ds_version
--
DROP TABLE IF EXISTS t_ds_version;
CREATE TABLE t_ds_version (
id int NOT NULL ,
version varchar(200) NOT NULL,
PRIMARY KEY (id)
) ;
create index version_index on t_ds_version(version);
--
-- Table structure for table t_ds_worker_group
--
DROP TABLE IF EXISTS t_ds_worker_group;
CREATE TABLE t_ds_worker_group (
id bigint NOT NULL ,
name varchar(256) DEFAULT NULL ,
ip_list varchar(256) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_worker_server
--
DROP TABLE IF EXISTS t_ds_worker_server;
CREATE TABLE t_ds_worker_server (
id int NOT NULL ,
host varchar(45) DEFAULT NULL ,
port int DEFAULT NULL ,
zk_directory varchar(64) DEFAULT NULL ,
res_info varchar(255) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
last_heartbeat_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence;
CREATE SEQUENCE t_ds_access_token_id_sequence;
ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence;
CREATE SEQUENCE t_ds_alert_id_sequence;
ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_alertgroup_id_sequence;
ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_command_id_sequence;
CREATE SEQUENCE t_ds_command_id_sequence;
ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence;
CREATE SEQUENCE t_ds_datasource_id_sequence;
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence;
CREATE SEQUENCE t_ds_process_definition_id_sequence;
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence;
CREATE SEQUENCE t_ds_process_instance_id_sequence;
ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_project_id_sequence;
CREATE SEQUENCE t_ds_project_id_sequence;
ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence;
CREATE SEQUENCE t_ds_queue_id_sequence;
ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence;
CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence;
ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence;
CREATE SEQUENCE t_ds_relation_process_instance_id_sequence;
ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence;
CREATE SEQUENCE t_ds_relation_project_user_id_sequence;
ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence;
CREATE SEQUENCE t_ds_relation_resources_user_id_sequence;
ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence;
CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence;
ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_relation_user_alertgroup_id_sequence;
CREATE SEQUENCE t_ds_relation_user_alertgroup_id_sequence;
ALTER TABLE t_ds_relation_user_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_user_alertgroup_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence;
CREATE SEQUENCE t_ds_resources_id_sequence;
ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence;
CREATE SEQUENCE t_ds_schedules_id_sequence;
ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence;
CREATE SEQUENCE t_ds_task_instance_id_sequence;
ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence;
CREATE SEQUENCE t_ds_tenant_id_sequence;
ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence;
CREATE SEQUENCE t_ds_udfs_id_sequence;
ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_user_id_sequence;
CREATE SEQUENCE t_ds_user_id_sequence;
ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_version_id_sequence;
CREATE SEQUENCE t_ds_version_id_sequence;
ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence;
CREATE SEQUENCE t_ds_worker_group_id_sequence;
ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence;
CREATE SEQUENCE t_ds_worker_server_id_sequence;
ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence');
-- Records of t_ds_user?user : admin , password : dolphinscheduler123
INSERT INTO t_ds_user(user_name,user_password,user_type,email,phone,state,tenant_id,create_time,update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', 'xx', 1, '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22');
-- Records of t_ds_alertgroup,dolphinscheduler warning group
INSERT INTO t_ds_alertgroup(group_name,group_type,description,create_time,update_time) VALUES ('dolphinscheduler warning group', '0', 'dolphinscheduler warning group','2018-11-29 10:20:39', '2018-11-29 10:20:39');
INSERT INTO t_ds_relation_user_alertgroup(alertgroup_id,user_id,create_time,update_time) VALUES ( '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_queue(queue_name,queue,create_time,update_time) VALUES ('default', 'default','2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('2.0.0');

53
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/DingTalkManager.java

@ -0,0 +1,53 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.alert.manager;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.alert.utils.DingTalkUtils;
import org.apache.dolphinscheduler.plugin.model.AlertInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* Ding Talk Manager
*/
public class DingTalkManager {
private static final Logger logger = LoggerFactory.getLogger(EnterpriseWeChatManager.class);
public Map<String,Object> send(AlertInfo alert) {
Map<String,Object> retMap = new HashMap<>();
retMap.put(Constants.STATUS, false);
logger.info("send message {}", alert.getAlertData().getTitle());
try {
String msg = buildMessage(alert);
DingTalkUtils.sendDingTalkMsg(msg, Constants.UTF_8);
} catch (IOException e) {
logger.error(e.getMessage(),e);
}
retMap.put(Constants.STATUS, true);
return retMap;
}
private String buildMessage(AlertInfo alert) {
String msg = alert.getAlertData().getContent();
return msg;
}
}

8
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPlugin.java

@ -16,9 +16,11 @@
*/
package org.apache.dolphinscheduler.alert.plugin;
import org.apache.dolphinscheduler.alert.manager.DingTalkManager;
import org.apache.dolphinscheduler.alert.manager.EmailManager;
import org.apache.dolphinscheduler.alert.manager.EnterpriseWeChatManager;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.alert.utils.DingTalkUtils;
import org.apache.dolphinscheduler.alert.utils.EnterpriseWeChatUtils;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
@ -44,6 +46,7 @@ public class EmailAlertPlugin implements AlertPlugin {
private static final EmailManager emailManager = new EmailManager();
private static final EnterpriseWeChatManager weChatManager = new EnterpriseWeChatManager();
private static final DingTalkManager dingTalkManager = new DingTalkManager();
public EmailAlertPlugin() {
this.pluginName = new PluginName();
@ -121,6 +124,11 @@ public class EmailAlertPlugin implements AlertPlugin {
logger.error(e.getMessage(), e);
}
}
if (DingTalkUtils.isEnableDingTalk) {
logger.info("Ding Talk is enable.");
dingTalkManager.send(info);
}
} else {
retMaps.put(Constants.MESSAGE, "alert send error.");

18
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java

@ -156,6 +156,23 @@ public class Constants {
public static final String ENTERPRISE_WECHAT_AGENT_ID = "enterprise.wechat.agent.id";
public static final String ENTERPRISE_WECHAT_USERS = "enterprise.wechat.users";
public static final String DINGTALK_WEBHOOK = "dingtalk.webhook";
public static final String DINGTALK_KEYWORD = "dingtalk.keyword";
public static final String DINGTALK_PROXY_ENABLE = "dingtalk.isEnableProxy";
public static final String DINGTALK_PROXY = "dingtalk.proxy";
public static final String DINGTALK_PORT = "dingtalk.port";
public static final String DINGTALK_USER = "dingtalk.user";
public static final String DINGTALK_PASSWORD = "dingtalk.password";
public static final String DINGTALK_ENABLE = "dingtalk.isEnable";
/**
* plugin config
@ -173,4 +190,5 @@ public class Constants {
public static final String PLUGIN_DEFAULT_EMAIL_RECEIVERCCS = "receiverCcs";
public static final String RETMAP_MSG = "msg";
}

136
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/DingTalkUtils.java

@ -0,0 +1,136 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.alert.utils;
import com.alibaba.fastjson.JSON;
import org.apache.commons.codec.binary.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* DingTalkUtils utils
* support send msg to ding talk by robot message push function.
* support proxy setting
*/
public class DingTalkUtils {
public static final Logger logger = LoggerFactory.getLogger(DingTalkUtils.class);
public static final boolean isEnableDingTalk = PropertyUtils.getBoolean(Constants.DINGTALK_ENABLE);
private static final String dingTaskUrl = PropertyUtils.getString(Constants.DINGTALK_WEBHOOK);
private static final String keyword = PropertyUtils.getString(Constants.DINGTALK_KEYWORD);
private static final Boolean isEnableProxy = PropertyUtils.getBoolean(Constants.DINGTALK_PROXY_ENABLE);
private static final String proxy = PropertyUtils.getString(Constants.DINGTALK_PROXY);
private static final String user = PropertyUtils.getString(Constants.DINGTALK_USER);
private static final String passwd = PropertyUtils.getString(Constants.DINGTALK_PASSWORD);
private static final Integer port = PropertyUtils.getInt(Constants.DINGTALK_PORT);
/**
* send message interface
* only support text message format now.
* @param msg message context to send
* @param charset charset type
* @return result of sending msg
* @throws IOException the IOException
*/
public static String sendDingTalkMsg(String msg, String charset) throws IOException {
String msgToJson = textToJsonString(msg + "#" + keyword);
HttpPost httpPost = constructHttpPost(msgToJson, charset);
CloseableHttpClient httpClient;
if (isEnableProxy) {
httpClient = getProxyClient();
RequestConfig rcf = getProxyConfig();
httpPost.setConfig(rcf);
} else {
httpClient = getDefaultClient();
}
try {
CloseableHttpResponse response = httpClient.execute(httpPost);
String resp;
try {
HttpEntity entity = response.getEntity();
resp = EntityUtils.toString(entity, charset);
EntityUtils.consume(entity);
} finally {
response.close();
}
logger.info("Ding Talk send [{}], resp:{%s}", msg, resp);
return resp;
} finally {
httpClient.close();
}
}
public static HttpPost constructHttpPost(String msg, String charset) {
HttpPost post = new HttpPost(dingTaskUrl);
StringEntity entity = new StringEntity(msg, charset);
post.setEntity(entity);
post.addHeader("Content-Type", "application/json; charset=utf-8");
return post;
}
public static CloseableHttpClient getProxyClient() {
HttpHost httpProxy = new HttpHost(proxy, port);
CredentialsProvider provider = new BasicCredentialsProvider();
provider.setCredentials(new AuthScope(httpProxy), new UsernamePasswordCredentials(user, passwd));
CloseableHttpClient httpClient = HttpClients.custom().setDefaultCredentialsProvider(provider).build();
return httpClient;
}
public static CloseableHttpClient getDefaultClient() {
return HttpClients.createDefault();
}
public static RequestConfig getProxyConfig() {
HttpHost httpProxy = new HttpHost(proxy, port);
return RequestConfig.custom().setProxy(httpProxy).build();
}
public static String textToJsonString(String text) {
Map<String, Object> items = new HashMap<String, Object>();
items.put("msgtype", "text");
Map<String, String> textContent = new HashMap<String, String>();
byte[] byt = StringUtils.getBytesUtf8(text);
String txt = StringUtils.newStringUtf8(byt);
textContent.put("content", txt);
items.put("text", textContent);
return JSON.toJSONString(items);
}
}

11
dolphinscheduler-alert/src/main/resources/alert.properties

@ -36,6 +36,7 @@ mail.smtp.ssl.trust=xxx.xxx.com
# Enterprise WeChat configuration
enterprise.wechat.enable=false
#enterprise.wechat.corp.id=xxxxxxx
#enterprise.wechat.secret=xxxxxxx
#enterprise.wechat.agent.id=xxxxxxx
@ -47,3 +48,13 @@ enterprise.wechat.enable=false
plugin.dir=/Users/xx/your/path/to/plugin/dir
#ding talk configuration
dingtalk.isEnable=flase
dingtalk.webhook=https://oapi.dingtalk.com/robot/send?access_token=xxxxx
dingtalk.keyword=
dingtalk.proxy=
dingtalk.port=80
dingtalk.user=
dingtalk.password=
dingtalk.isEnableProxy=false

125
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/DingTalkUtilsTest.java

@ -0,0 +1,125 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.alert.utils;
import com.alibaba.fastjson.JSON;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import static org.junit.Assert.*;
@PrepareForTest(PropertyUtils.class)
@RunWith(PowerMockRunner.class)
@PowerMockIgnore("javax.net.ssl.*")
public class DingTalkUtilsTest {
Logger logger = LoggerFactory.getLogger(DingTalkUtilsTest.class);
private static final String mockUrl = "https://oapi.dingtalk.com/robot/send?access_token=test";
private static final String mockKeyWords = "onway";
private static final String msg = "ding talk test";
@Before
public void init(){
PowerMockito.mockStatic(PropertyUtils.class);
Mockito.when(PropertyUtils.getString(Constants.DINGTALK_WEBHOOK)).thenReturn(mockUrl);
Mockito.when(PropertyUtils.getString(Constants.DINGTALK_KEYWORD)).thenReturn(mockKeyWords);
Mockito.when(PropertyUtils.getBoolean(Constants.DINGTALK_PROXY_ENABLE)).thenReturn(true);
Mockito.when(PropertyUtils.getString(Constants.DINGTALK_PROXY)).thenReturn("proxy.com.cn");
Mockito.when(PropertyUtils.getString(Constants.DINGTALK_USER)).thenReturn("user");
Mockito.when(PropertyUtils.getString(Constants.DINGTALK_PASSWORD)).thenReturn("pswd");
Mockito.when(PropertyUtils.getInt(Constants.DINGTALK_PORT)).thenReturn(80);
}
// @Test
// @Ignore
// public void testSendMsg() {
// try {
// String msgTosend = "msg to send";
// logger.info(PropertyUtils.getString(Constants.DINGTALK_WEBHOOK));
// String rsp = DingTalkUtils.sendDingTalkMsg(msgTosend, Constants.UTF_8);
// logger.info("send msg result:{}",rsp);
// String errmsg = JSON.parseObject(rsp).getString("errmsg");
// Assert.assertEquals("ok", errmsg);
// } catch (Exception e) {
// e.printStackTrace();
// }
// }
@Test
public void testCreateDefaultClient() {
CloseableHttpClient client = DingTalkUtils.getDefaultClient();;
try {
Assert.assertNotNull(client);
client.close();
} catch (IOException ex) {
logger.info("close exception",ex.getMessage());
new Throwable();
}
}
@Test
public void testCreateProxyClient() {
CloseableHttpClient client = DingTalkUtils.getProxyClient();
try {
Assert.assertNotNull(client);
client.close();
} catch (IOException ex) {
logger.info("close exception",ex.getMessage());
new Throwable();
}
}
@Test
public void testProxyConfig() {
RequestConfig rc = DingTalkUtils.getProxyConfig();
Assert.assertEquals(rc.getProxy().getPort(), 80);
Assert.assertEquals(rc.getProxy().getHostName(), "proxy.com.cn");
}
@Test
public void testDingTalkMsgToJson() {
String jsonString = DingTalkUtils.textToJsonString("this is test");
logger.info(jsonString);
String expect = "{\"text\":{\"content\":\"this is test\"},\"msgtype\":\"text\"}";
Assert.assertEquals(expect, jsonString);
}
@Test
public void testDingTalkMsgUtf8() {
String msg = DingTalkUtils.textToJsonString("this is test:中文");
logger.info("test support utf8, actual:" + msg);
logger.info("test support utf8, actual:" + DingTalkUtils.isEnableDingTalk);
String expect = "{\"text\":{\"content\":\"this is test:中文\"},\"msgtype\":\"text\"}";
Assert.assertEquals(expect, msg);
}
}

1
pom.xml

@ -687,6 +687,7 @@
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/alert/utils/DingTalkUtilsTest.java</include>
<include>**/alert/template/AlertTemplateFactoryTest.java</include>
<include>**/alert/template/impl/DefaultHTMLTemplateTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>

2
script/scp-hosts.sh

@ -49,4 +49,4 @@ do
done
echo "scp dirs to $host/$installPath complete"
done
done

3
sql/dolphinscheduler-postgre.sql

@ -623,6 +623,7 @@ CREATE TABLE t_ds_user (
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
queue varchar(64) DEFAULT NULL ,
state int DEFAULT 1 ,
PRIMARY KEY (id)
);
@ -759,4 +760,4 @@ INSERT INTO t_ds_relation_user_alertgroup(alertgroup_id,user_id,create_time,upda
INSERT INTO t_ds_queue(queue_name,queue,create_time,update_time) VALUES ('default', 'default','2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('2.0.0');
INSERT INTO t_ds_version(version) VALUES ('2.0.0');

1
sql/dolphinscheduler_mysql.sql

@ -775,6 +775,7 @@ CREATE TABLE `t_ds_user` (
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`queue` varchar(64) DEFAULT NULL COMMENT 'queue',
`state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable',
PRIMARY KEY (`id`),
UNIQUE KEY `user_name_unique` (`user_name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;

Loading…
Cancel
Save